From adc21144c04a0ade07fd660948bb8f390dc47578 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 10 Jun 2024 11:28:00 +0100 Subject: [PATCH 001/139] feat: gym wrapper --- mava/configs/arch/sebulba.yaml | 24 +++++++++ mava/utils/make_env.py | 28 +++++++++++ mava/wrappers/__init__.py | 1 + mava/wrappers/gym.py | 92 ++++++++++++++++++++++++++++++++++ requirements/requirements.txt | 1 + 5 files changed, 146 insertions(+) create mode 100644 mava/configs/arch/sebulba.yaml create mode 100644 mava/wrappers/gym.py diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml new file mode 100644 index 000000000..ed1d07dff --- /dev/null +++ b/mava/configs/arch/sebulba.yaml @@ -0,0 +1,24 @@ +# --- Sebulba config --- +arch_name: "sebulba" +num_envs: 16 # number of envs per thread + +# --- Evaluation --- +evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select + # an action which corresponds to the greatest logit. If false, the policy will sample + # from the logits. +num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. +num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. +absolute_metric: True # Whether the absolute metric should be computed. For more details + # on the absolute metric please see: https://arxiv.org/abs/2209.10485 + +# --- Sebulba devices config --- +n_threads_per_executor: 1 # num of different threads/env batches per actor +executor_device_ids: [0] # ids of actor devices +learner_device_ids: [0] # ids of learner devices + +# --- Sebulba rollout and env config --- +concurrency: False # whether actor and learner should run concurrently +async_envs: True # "whether to use async vector or sync vector envs" + +# --- To be defined during training --- +log_frequency: ~ \ No newline at end of file diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 39b348b40..c66d585f5 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -14,9 +14,11 @@ from typing import Tuple +import gym.vector import jaxmarl import jumanji import matrax +import gym from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment @@ -46,6 +48,7 @@ RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, + GymWrapper, ) # Registry mapping environment names to their generator and wrapper classes. @@ -198,6 +201,29 @@ def make_gigastep_env( train_env, eval_env = add_extra_wrappers(train_env, eval_env, config) return train_env, eval_env +def make_gym_env(env_name: str, config: DictConfig, add_global_state: bool = False): + """ + Create a Gym environment. + + Args: + env_name (str): The name of the environment to create. + config (Dict): The configuration of the environment. + add_global_state (bool): Whether to add the global state to the observation. Default False. + + Returns: + A tuple of the environments. + """ + def create_gym_env(config: DictConfig, add_global_state: bool = False, eval_env : bool = False): #todo: add the RecordEpisodeMetrics for gym. + env = gym.make(config.env.scenario) + wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) + if not config.env.implicit_agent_id: + pass #todo : add agent id wrapper for gym . + return wrapped_env + + num_env = config.arch.num_envs + train_env = gym.vector.async_vector_env([create_gym_env(config, add_global_state) for _ in range(num_env)]) + eval_env = gym.vector.async_vector_env([create_gym_env(config, add_global_state, eval_env=True) for _ in range(num_env)]) + return train_env, eval_env def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environment, Environment]: """ @@ -220,5 +246,7 @@ def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environmen return make_matrax_env(env_name, config, add_global_state) elif env_name in _gigastep_registry: return make_gigastep_env(env_name, config, add_global_state) + elif env_name.startswith("gym"): + return make_gym_env(env_name, config, add_global_state) else: raise ValueError(f"{env_name} is not a supported environment.") diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 91bf7b4c4..7fd63ecbc 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -24,3 +24,4 @@ ) from mava.wrappers.matrax import MatraxWrapper from mava.wrappers.observation import AgentIDWrapper +from mava.wrappers.gym import GymWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py new file mode 100644 index 000000000..f1ea5004b --- /dev/null +++ b/mava/wrappers/gym.py @@ -0,0 +1,92 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gym +import numpy as np +from gym.spaces import Box, MultiDiscrete +from typing import TYPE_CHECKING, Dict, Tuple, Union + + +class GymWrapper(gym.Wrapper): + """Wrapper for gym environments""" + + def __init__(self, env: gym.env, use_individual_rewards : bool = False,add_global_state : bool = False, eval_env : bool = False): + """Initialize the gym wrapper + + Args: + env (gym.env): gym env instance. + use_individual_rewards (bool, optional): Use individual or group rewards. Defaults to False. + add_global_state (bool, optional) : Create global observations. Defaults to False. + eval_env (bool, optional): Weather the instance is used for training or evaluation. Defaults to False. + """ + super().__init__(env) + self._env = env + self.use_individual_rewards = use_individual_rewards + self.add_global_state = add_global_state #todo : add the global observations + self.eval_env = eval_env + self.num_agents = self._env.n_agents + self.num_actions = self._env.action_space[0].n #todo: all the agents must have the same num_actions, add assertion? + + def reset(self): + + obs, extra = self._env.reset(seed = np.random.randint(), option = {}) #todo: assure reproducibility + reward = np.zeros(self._env.n_agents) + terminated, truncated = np.zeros(self._env.n_agents , dtype=bool), np.zeros(self._env.n_agents , dtype=bool) + actions_mask = self._get_actions_mask(extra) + + + return np.array(obs), actions_mask, reward, terminated, truncated, extra + + def step(self , actions : np.array): + + if self._reset_next_step and not self.eval_env: #only auto-reset in training envs. + return self.reset() + + obs, reward, terminated, truncated, extra = self.env.step(actions) + + terminated, truncated = np.array(terminated), np.array(truncated) + + done = np.logical_or(terminated, truncated).all() + + if done and not self.eval_env: #only auto-reset in training envs, same functionality as the AutoResetWrapper. + return self.reset() + + actions_mask = self._get_actions_mask(extra) + + + + if self.use_individual_rewards: + reward = np.array(reward) + else: + reward = np.array([np.array(reward).mean()] * self.num_agents) + + return np.array(obs), actions_mask, reward, terminated, truncated, extra + + + def _get_actions_mask(self, extra : Dict) -> np.array: + if "action_mask" in extra: + return np.array(extra["action_mask"]) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + + + + + + + + + + + + \ No newline at end of file diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 5efd3bbe1..88c61ce0f 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -21,3 +21,4 @@ scipy==1.12.0 tensorboard_logger tensorflow_probability type_enforced # needed because gigastep is missing this dependency +rware @ git+https://github.com/RuanJohn/robotic-warehouse.git \ No newline at end of file From ce86d096060f8fad5e4ef1ddd587cc33b06da692 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 10 Jun 2024 11:54:24 +0100 Subject: [PATCH 002/139] chore : pre-commit hooks --- mava/configs/arch/sebulba.yaml | 2 +- mava/utils/make_env.py | 27 +++++++--- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 94 +++++++++++++++++----------------- requirements/requirements.txt | 2 +- 5 files changed, 69 insertions(+), 58 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index ed1d07dff..98cd4d96d 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -21,4 +21,4 @@ concurrency: False # whether actor and learner should run concurrently async_envs: True # "whether to use async vector or sync vector envs" # --- To be defined during training --- -log_frequency: ~ \ No newline at end of file +log_frequency: ~ diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index c66d585f5..44758b41d 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -14,11 +14,11 @@ from typing import Tuple +import gym import gym.vector import jaxmarl import jumanji import matrax -import gym from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment @@ -42,13 +42,13 @@ CleanerWrapper, ConnectorWrapper, GigastepWrapper, + GymWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, - GymWrapper, ) # Registry mapping environment names to their generator and wrapper classes. @@ -201,7 +201,10 @@ def make_gigastep_env( train_env, eval_env = add_extra_wrappers(train_env, eval_env, config) return train_env, eval_env -def make_gym_env(env_name: str, config: DictConfig, add_global_state: bool = False): + +def make_gym_env( + env_name: str, config: DictConfig, add_global_state: bool = False +) -> Tuple[Environment, Environment]: #todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -213,18 +216,26 @@ def make_gym_env(env_name: str, config: DictConfig, add_global_state: bool = Fal Returns: A tuple of the environments. """ - def create_gym_env(config: DictConfig, add_global_state: bool = False, eval_env : bool = False): #todo: add the RecordEpisodeMetrics for gym. + + def create_gym_env( + config: DictConfig, add_global_state: bool = False, eval_env: bool = False + ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: - pass #todo : add agent id wrapper for gym . + pass # todo : add agent id wrapper for gym . return wrapped_env - + num_env = config.arch.num_envs - train_env = gym.vector.async_vector_env([create_gym_env(config, add_global_state) for _ in range(num_env)]) - eval_env = gym.vector.async_vector_env([create_gym_env(config, add_global_state, eval_env=True) for _ in range(num_env)]) + train_env = gym.vector.async_vector_env( + [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)] + ) + eval_env = gym.vector.async_vector_env( + [create_gym_env(config, add_global_state, eval_env=True) for _ in range(num_env)] + ) return train_env, eval_env + def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environment, Environment]: """ Create environments for training and evaluation.. diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 7fd63ecbc..14a679cac 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,6 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper +from mava.wrappers.gym import GymWrapper from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, @@ -24,4 +25,3 @@ ) from mava.wrappers.matrax import MatraxWrapper from mava.wrappers.observation import AgentIDWrapper -from mava.wrappers.gym import GymWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index f1ea5004b..9c4d8b74d 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -12,81 +12,81 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Tuple + import gym import numpy as np -from gym.spaces import Box, MultiDiscrete -from typing import TYPE_CHECKING, Dict, Tuple, Union +from numpy.typing import NDArray class GymWrapper(gym.Wrapper): """Wrapper for gym environments""" - - def __init__(self, env: gym.env, use_individual_rewards : bool = False,add_global_state : bool = False, eval_env : bool = False): + + def __init__( + self, + env: gym.env, + use_individual_rewards: bool = False, + add_global_state: bool = False, + eval_env: bool = False, + ): """Initialize the gym wrapper Args: env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. Defaults to False. + use_individual_rewards (bool, optional): Use individual or group rewards. + Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. - eval_env (bool, optional): Weather the instance is used for training or evaluation. Defaults to False. + eval_env (bool, optional): Weather the instance is used for training or evaluation. + Defaults to False. """ super().__init__(env) self._env = env self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state #todo : add the global observations + self.add_global_state = add_global_state # todo : add the global observations self.eval_env = eval_env self.num_agents = self._env.n_agents - self.num_actions = self._env.action_space[0].n #todo: all the agents must have the same num_actions, add assertion? - - def reset(self): - - obs, extra = self._env.reset(seed = np.random.randint(), option = {}) #todo: assure reproducibility + self.num_actions = self._env.action_space[ + 0 + ].n # todo: all the agents must have the same num_actions, add assertion? + + def reset(self) -> Tuple: + obs, extra = self._env.reset( + seed=np.random.randint(1), option={} + ) # todo: assure reproducibility reward = np.zeros(self._env.n_agents) - terminated, truncated = np.zeros(self._env.n_agents , dtype=bool), np.zeros(self._env.n_agents , dtype=bool) + terminated, truncated = np.zeros(self._env.n_agents, dtype=bool), np.zeros( + self._env.n_agents, dtype=bool + ) actions_mask = self._get_actions_mask(extra) - - - return np.array(obs), actions_mask, reward, terminated, truncated, extra - - def step(self , actions : np.array): - - if self._reset_next_step and not self.eval_env: #only auto-reset in training envs. + + return np.array(obs), actions_mask, reward, terminated, truncated, extra + + def step(self, actions: NDArray) -> Tuple: + + if self._reset_next_step and not self.eval_env: # only auto-reset in training envs. return self.reset() - + obs, reward, terminated, truncated, extra = self.env.step(actions) - + terminated, truncated = np.array(terminated), np.array(truncated) - - done = np.logical_or(terminated, truncated).all() - - if done and not self.eval_env: #only auto-reset in training envs, same functionality as the AutoResetWrapper. + + done = np.logical_or(terminated, truncated).all() + + if ( + done and not self.eval_env + ): # only auto-reset in training envs, same functionality as the AutoResetWrapper. return self.reset() - + actions_mask = self._get_actions_mask(extra) - - if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - - return np.array(obs), actions_mask, reward, terminated, truncated, extra - - - def _get_actions_mask(self, extra : Dict) -> np.array: + + return np.array(obs), actions_mask, reward, terminated, truncated, extra + + def _get_actions_mask(self, extra: Dict) -> NDArray: if "action_mask" in extra: - return np.array(extra["action_mask"]) + return np.array(extra["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - - - - - - - - - - - - \ No newline at end of file diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 88c61ce0f..3b3bc4c58 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -17,8 +17,8 @@ numpy omegaconf optax protobuf~=3.20 +rware @ git+https://github.com/RuanJohn/robotic-warehouse.git scipy==1.12.0 tensorboard_logger tensorflow_probability type_enforced # needed because gigastep is missing this dependency -rware @ git+https://github.com/RuanJohn/robotic-warehouse.git \ No newline at end of file From d5edf4540092e98c44832863950f23ef976a64b2 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 12:00:56 +0100 Subject: [PATCH 003/139] fix: merged the observations and action mask --- mava/utils/make_env.py | 4 +++- mava/wrappers/gym.py | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 44758b41d..22419a4bb 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -204,7 +204,9 @@ def make_gigastep_env( def make_gym_env( env_name: str, config: DictConfig, add_global_state: bool = False -) -> Tuple[Environment, Environment]: #todo : create the appropriate annotation for the sync vector +) -> Tuple[ + Environment, Environment +]: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 9c4d8b74d..f634dcc46 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -18,6 +18,8 @@ import numpy as np from numpy.typing import NDArray +from mava.types import Observation + class GymWrapper(gym.Wrapper): """Wrapper for gym environments""" @@ -48,9 +50,10 @@ def __init__( self.num_actions = self._env.action_space[ 0 ].n # todo: all the agents must have the same num_actions, add assertion? + self.step_count = 0 # todo : make sure this implementaion is correct def reset(self) -> Tuple: - obs, extra = self._env.reset( + agents_view, extra = self._env.reset( seed=np.random.randint(1), option={} ) # todo: assure reproducibility reward = np.zeros(self._env.n_agents) @@ -59,14 +62,19 @@ def reset(self) -> Tuple: ) actions_mask = self._get_actions_mask(extra) - return np.array(obs), actions_mask, reward, terminated, truncated, extra + obs = Observation( + agents_view=np.array(agents_view), action_mask=actions_mask, step_count=self.step_count + ) + + return obs, reward, terminated, truncated, extra def step(self, actions: NDArray) -> Tuple: + self.step_count += 1 if self._reset_next_step and not self.eval_env: # only auto-reset in training envs. return self.reset() - obs, reward, terminated, truncated, extra = self.env.step(actions) + agents_view, reward, terminated, truncated, extra = self.env.step(actions) terminated, truncated = np.array(terminated), np.array(truncated) @@ -84,7 +92,11 @@ def step(self, actions: NDArray) -> Tuple: else: reward = np.array([np.array(reward).mean()] * self.num_agents) - return np.array(obs), actions_mask, reward, terminated, truncated, extra + obs = Observation( + agents_view=np.array(agents_view), action_mask=actions_mask, step_count=self.step_count + ) + + return obs, reward, terminated, truncated, extra def _get_actions_mask(self, extra: Dict) -> NDArray: if "action_mask" in extra: From f891be555886f0a1ed415683bb499cf32605eb4c Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 12:38:00 +0100 Subject: [PATCH 004/139] fix: Create the gym wrappers directly --- mava/utils/make_env.py | 14 +++++--------- mava/wrappers/gym.py | 3 ++- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 22419a4bb..ed4cec124 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -203,7 +203,7 @@ def make_gigastep_env( def make_gym_env( - env_name: str, config: DictConfig, add_global_state: bool = False + env_name: str, config: DictConfig, add_global_state: bool = False , eval_env : bool = False ) -> Tuple[ Environment, Environment ]: # todo : create the appropriate annotation for the sync vector @@ -229,13 +229,11 @@ def create_gym_env( return wrapped_env num_env = config.arch.num_envs - train_env = gym.vector.async_vector_env( - [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)] - ) - eval_env = gym.vector.async_vector_env( - [create_gym_env(config, add_global_state, eval_env=True) for _ in range(num_env)] + envs = gym.vector.async_vector_env( + [lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env)] ) - return train_env, eval_env + + return envs def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environment, Environment]: @@ -259,7 +257,5 @@ def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environmen return make_matrax_env(env_name, config, add_global_state) elif env_name in _gigastep_registry: return make_gigastep_env(env_name, config, add_global_state) - elif env_name.startswith("gym"): - return make_gym_env(env_name, config, add_global_state) else: raise ValueError(f"{env_name} is not a supported environment.") diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index f634dcc46..2c06f7e86 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -71,7 +71,7 @@ def reset(self) -> Tuple: def step(self, actions: NDArray) -> Tuple: self.step_count += 1 - if self._reset_next_step and not self.eval_env: # only auto-reset in training envs. + if self._reset_next_step and not self.eval_env: # only auto-reset in training envs. todo: turn this into a sepreat wrapper return self.reset() agents_view, reward, terminated, truncated, extra = self.env.step(actions) @@ -102,3 +102,4 @@ def _get_actions_mask(self, extra: Dict) -> NDArray: if "action_mask" in extra: return np.array(extra["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + From 15f486709e6387dddce83900bed95b85521260e4 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 12:39:10 +0100 Subject: [PATCH 005/139] chore: pre-commit --- mava/utils/make_env.py | 13 +++++++------ mava/wrappers/gym.py | 5 +++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index ed4cec124..01d2a2eb0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -203,10 +203,8 @@ def make_gigastep_env( def make_gym_env( - env_name: str, config: DictConfig, add_global_state: bool = False , eval_env : bool = False -) -> Tuple[ - Environment, Environment -]: # todo : create the appropriate annotation for the sync vector + env_name: str, config: DictConfig, add_global_state: bool = False, eval_env: bool = False +) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -230,9 +228,12 @@ def create_gym_env( num_env = config.arch.num_envs envs = gym.vector.async_vector_env( - [lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env)] + [ + lambda: create_gym_env(config, add_global_state, eval_env=eval_env) + for _ in range(num_env) + ] ) - + return envs diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 2c06f7e86..0cbfbc751 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -71,7 +71,9 @@ def reset(self) -> Tuple: def step(self, actions: NDArray) -> Tuple: self.step_count += 1 - if self._reset_next_step and not self.eval_env: # only auto-reset in training envs. todo: turn this into a sepreat wrapper + if ( + self._reset_next_step and not self.eval_env + ): # only auto-reset in training envs. todo: turn this into a sepreat wrapper return self.reset() agents_view, reward, terminated, truncated, extra = self.env.step(actions) @@ -102,4 +104,3 @@ def _get_actions_mask(self, extra: Dict) -> NDArray: if "action_mask" in extra: return np.array(extra["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - From 82ea827e0e7cf0bcc8ab269877050064ca25b3b7 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 12:47:54 +0100 Subject: [PATCH 006/139] fix: fixed the async env creation --- mava/utils/make_env.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 01d2a2eb0..d40249c54 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -227,7 +227,7 @@ def create_gym_env( return wrapped_env num_env = config.arch.num_envs - envs = gym.vector.async_vector_env( + envs = gym.vector.AsyncVectorEnv( [ lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env) From 4e94df57880b4c6370e2da4489961e5339044eb8 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 14:34:50 +0100 Subject: [PATCH 007/139] fix: gymV26 compatability wrapper --- mava/configs/env/gym.yaml | 21 +++++++++++++++++++++ mava/utils/make_env.py | 4 ++++ 2 files changed, 25 insertions(+) create mode 100644 mava/configs/env/gym.yaml diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml new file mode 100644 index 000000000..ad8d16b9a --- /dev/null +++ b/mava/configs/env/gym.yaml @@ -0,0 +1,21 @@ +# ---Environment Configs--- + +scenario: rware:rware-tiny-2ag-v1 # [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] + +env_name: RobotWarehouse # Used for logging purposes. + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. +# This should not be changed. +implicit_agent_id: False +# Whether or not to log the winrate of this environment. This should not be changed as not all +# environments have a winrate metric. +log_win_rate: False + +use_individual_rewards: True + +kwargs: + time_limit: 500 diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index d40249c54..806883786 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -16,6 +16,7 @@ import gym import gym.vector +import gym.wrappers import jaxmarl import jumanji import matrax @@ -221,6 +222,9 @@ def create_gym_env( config: DictConfig, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) + env = gym.wrappers.EnvCompatibility( + env + ) # todo: check if this will break if env is developed for v26 wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: pass # todo : add agent id wrapper for gym . From 8a86be98f4f422bfaa627d10eb27c88bb40557ae Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 15 Jun 2024 15:36:31 +0100 Subject: [PATCH 008/139] fix: various minor fixes --- mava/utils/make_env.py | 6 ++++-- mava/wrappers/gym.py | 14 +++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 806883786..1515cca0c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -17,6 +17,8 @@ import gym import gym.vector import gym.wrappers +import gym.wrappers +import gym.wrappers.compatibility import jaxmarl import jumanji import matrax @@ -204,7 +206,7 @@ def make_gigastep_env( def make_gym_env( - env_name: str, config: DictConfig, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -222,7 +224,7 @@ def create_gym_env( config: DictConfig, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) - env = gym.wrappers.EnvCompatibility( + env = gym.wrappers.compatibility.EnvCompatibility( env ) # todo: check if this will break if env is developed for v26 wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 0cbfbc751..99b56d621 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -26,7 +26,7 @@ class GymWrapper(gym.Wrapper): def __init__( self, - env: gym.env, + env: gym.Env, use_individual_rewards: bool = False, add_global_state: bool = False, eval_env: bool = False, @@ -46,7 +46,7 @@ def __init__( self.use_individual_rewards = use_individual_rewards self.add_global_state = add_global_state # todo : add the global observations self.eval_env = eval_env - self.num_agents = self._env.n_agents + self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ 0 ].n # todo: all the agents must have the same num_actions, add assertion? @@ -54,11 +54,11 @@ def __init__( def reset(self) -> Tuple: agents_view, extra = self._env.reset( - seed=np.random.randint(1), option={} + seed=np.random.randint(1) ) # todo: assure reproducibility - reward = np.zeros(self._env.n_agents) - terminated, truncated = np.zeros(self._env.n_agents, dtype=bool), np.zeros( - self._env.n_agents, dtype=bool + reward = np.zeros(self.num_agents) + terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( + self.num_agents, dtype=bool ) actions_mask = self._get_actions_mask(extra) @@ -103,4 +103,4 @@ def step(self, actions: NDArray) -> Tuple: def _get_actions_mask(self, extra: Dict) -> NDArray: if "action_mask" in extra: return np.array(extra["action_mask"]) - return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) \ No newline at end of file From 1da5c15b13c74c8286819cca9b36277bf8030a27 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 15 Jun 2024 16:09:16 +0100 Subject: [PATCH 009/139] fix: handling rware reset function --- mava/utils/make_env.py | 2 +- mava/wrappers/gym.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 1515cca0c..1e2721dc6 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -226,7 +226,7 @@ def create_gym_env( env = gym.make(config.env.scenario) env = gym.wrappers.compatibility.EnvCompatibility( env - ) # todo: check if this will break if env is developed for v26 + ) wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: pass # todo : add agent id wrapper for gym . diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 99b56d621..fff21a899 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -53,9 +53,9 @@ def __init__( self.step_count = 0 # todo : make sure this implementaion is correct def reset(self) -> Tuple: - agents_view, extra = self._env.reset( + (agents_view, extra), _ = self._env.reset( seed=np.random.randint(1) - ) # todo: assure reproducibility + ) # todo: assure reproducibility, this only works for rware reward = np.zeros(self.num_agents) terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( self.num_agents, dtype=bool From 4466044d07541fb3e48b56f42c26be2a235a3e31 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sun, 16 Jun 2024 18:58:27 +0100 Subject: [PATCH 010/139] feat: async env wrapper , changed the gym wrapper to rware wrapper --- mava/configs/default_ff_ippo_seb.yaml | 7 +++ mava/systems/sebulba/ppo/test.py | 50 ++++++++++++++++++ mava/utils/make_env.py | 19 ++++--- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 75 +++++++++++++++++---------- 5 files changed, 117 insertions(+), 36 deletions(-) create mode 100644 mava/configs/default_ff_ippo_seb.yaml create mode 100644 mava/systems/sebulba/ppo/test.py diff --git a/mava/configs/default_ff_ippo_seb.yaml b/mava/configs/default_ff_ippo_seb.yaml new file mode 100644 index 000000000..1002d90c4 --- /dev/null +++ b/mava/configs/default_ff_ippo_seb.yaml @@ -0,0 +1,7 @@ +defaults: + - logger: ff_ippo + - arch: sebulba + - system: ppo/ff_ippo + - network: mlp + - env: gym + - _self_ diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py new file mode 100644 index 000000000..b868f69b6 --- /dev/null +++ b/mava/systems/sebulba/ppo/test.py @@ -0,0 +1,50 @@ + +import copy +import time +from typing import Any, Dict, Tuple, List +import threading +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import numpy as np +import optax +import queue +from collections import deque +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from omegaconf import DictConfig, OmegaConf +from optax._src.base import OptState +from rich.pretty import pprint + +from mava.evaluator import make_eval_fns +from mava.networks import FeedForwardActor as Actor +from mava.networks import FeedForwardValueNet as Critic +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + merge_leading_dims, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + + env = environments.make_gym_env(cfg) + a = env.reset() + print(a) + +if __name__ == "__main__": + hydra_entry_point() \ No newline at end of file diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 1e2721dc6..61b379fd7 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -45,7 +45,8 @@ CleanerWrapper, ConnectorWrapper, GigastepWrapper, - GymWrapper, + GymRwareWrapper, + AsyncGymWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -69,6 +70,8 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} +_gym_registry = {"rware" : GymRwareWrapper} + def add_extra_wrappers( train_env: Environment, eval_env: Environment, config: DictConfig @@ -219,27 +222,27 @@ def make_gym_env( Returns: A tuple of the environments. """ + base_env_name = config.env.scenario.split(":")[0] + wrapper = _gym_registry[base_env_name] def create_gym_env( config: DictConfig, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) - env = gym.wrappers.compatibility.EnvCompatibility( - env - ) - wrapped_env = GymWrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) + _gym_registry + wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: pass # todo : add agent id wrapper for gym . return wrapped_env - num_env = config.arch.num_envs - envs = gym.vector.AsyncVectorEnv( + num_env = config.arch.num_envs + envs = gym.vector.AsyncVectorEnv( #todo : give them more descriptive names [ lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env) ] ) - + envs = AsyncGymWrapper(envs) return envs diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 14a679cac..6210ca6ed 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymWrapper +from mava.wrappers.gym import GymRwareWrapper, AsyncGymWrapper from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index fff21a899..bc71e3e81 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -21,8 +21,8 @@ from mava.types import Observation -class GymWrapper(gym.Wrapper): - """Wrapper for gym environments""" +class GymRwareWrapper(gym.Wrapper): + """Wrapper for rware gym environments""" def __init__( self, @@ -42,7 +42,7 @@ def __init__( Defaults to False. """ super().__init__(env) - self._env = env + self._env = gym.wrappers.compatibility.EnvCompatibility(env) self.use_individual_rewards = use_individual_rewards self.add_global_state = add_global_state # todo : add the global observations self.eval_env = eval_env @@ -50,33 +50,29 @@ def __init__( self.num_actions = self._env.action_space[ 0 ].n # todo: all the agents must have the same num_actions, add assertion? - self.step_count = 0 # todo : make sure this implementaion is correct def reset(self) -> Tuple: - (agents_view, extra), _ = self._env.reset( + (agents_view, info), _ = self._env.reset( seed=np.random.randint(1) ) # todo: assure reproducibility, this only works for rware - reward = np.zeros(self.num_agents) - terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( - self.num_agents, dtype=bool - ) - actions_mask = self._get_actions_mask(extra) - - obs = Observation( - agents_view=np.array(agents_view), action_mask=actions_mask, step_count=self.step_count - ) - return obs, reward, terminated, truncated, extra + info["action_mask"] = self._get_actions_mask(info) + + return np.array(agents_view), info def step(self, actions: NDArray) -> Tuple: - self.step_count += 1 if ( self._reset_next_step and not self.eval_env ): # only auto-reset in training envs. todo: turn this into a sepreat wrapper - return self.reset() + agents_view, info = self.reset() + reward = np.zeros(self.num_agents) + terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( + self.num_agents, dtype=bool + ) + return agents_view, reward, terminated, truncated, info - agents_view, reward, terminated, truncated, extra = self.env.step(actions) + agents_view, reward, terminated, truncated, info = self.env.step(actions) terminated, truncated = np.array(terminated), np.array(truncated) @@ -87,20 +83,45 @@ def step(self, actions: NDArray) -> Tuple: ): # only auto-reset in training envs, same functionality as the AutoResetWrapper. return self.reset() - actions_mask = self._get_actions_mask(extra) + info["action_mask"] = self._get_actions_mask(info) if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - obs = Observation( - agents_view=np.array(agents_view), action_mask=actions_mask, step_count=self.step_count - ) - return obs, reward, terminated, truncated, extra + return agents_view, reward, terminated, truncated, info - def _get_actions_mask(self, extra: Dict) -> NDArray: - if "action_mask" in extra: - return np.array(extra["action_mask"]) - return np.ones((self.num_agents, self.num_actions), dtype=np.float32) \ No newline at end of file + def _get_actions_mask(self, info: Dict) -> NDArray: + if "action_mask" in info: + return np.array(info["action_mask"]) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + + +class AsyncGymWrapper: + """Wrapper for async gym environments""" + + def __init__(self, env: gym.vector.AsyncVectorEnv): + self._env = env + self.step_count = 0 #todo : make sure this is implemented correctly + + def reset(self) -> Tuple[Observation, Dict]: + agents_view , info = self._env.reset() + obs = self._create_obs(agents_view, info) + return obs, info + + def step(self) -> Tuple[Observation, NDArray, NDArray, NDArray, Dict]: + + self.step_count += 1 + agents_view, reward, terminated, truncated, info = self._env.step() + obs = self._create_obs(agents_view, info) + + return obs, reward, terminated, truncated, info + + + def _create_obs(self, agents_view : NDArray, info: Dict) -> Observation: + """Create the observations""" + agents_view = np.array(agents_view) + return Observation(agents_view=agents_view, action_mask=info["action_mask"], step_count=self.step_count) + \ No newline at end of file From 24d8aaefb596904e5fd9e0be813947405a3ecdaa Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sun, 16 Jun 2024 22:43:55 +0100 Subject: [PATCH 011/139] fix: fixed the async env wrapper --- mava/wrappers/gym.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index bc71e3e81..2c6597830 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -62,26 +62,19 @@ def reset(self) -> Tuple: def step(self, actions: NDArray) -> Tuple: - if ( - self._reset_next_step and not self.eval_env - ): # only auto-reset in training envs. todo: turn this into a sepreat wrapper - agents_view, info = self.reset() - reward = np.zeros(self.num_agents) - terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( - self.num_agents, dtype=bool - ) - return agents_view, reward, terminated, truncated, info - agents_view, reward, terminated, truncated, info = self.env.step(actions) - terminated, truncated = np.array(terminated), np.array(truncated) - done = np.logical_or(terminated, truncated).all() if ( done and not self.eval_env ): # only auto-reset in training envs, same functionality as the AutoResetWrapper. - return self.reset() + agents_view, info = self.reset() + reward = np.zeros(self.num_agents) + terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( + self.num_agents, dtype=bool + ) + return agents_view, reward, terminated, truncated, info info["action_mask"] = self._get_actions_mask(info) @@ -99,22 +92,29 @@ def _get_actions_mask(self, info: Dict) -> NDArray: return np.ones((self.num_agents, self.num_actions), dtype=np.float32) -class AsyncGymWrapper: +class AsyncGymWrapper(gym.Wrapper): """Wrapper for async gym environments""" def __init__(self, env: gym.vector.AsyncVectorEnv): + super().__init__(env) self._env = env self.step_count = 0 #todo : make sure this is implemented correctly + action_space = env.single_action_space + self.num_agents = len(action_space) + self.num_actions = action_space[0].n + self.num_envs = env.num_envs + def reset(self) -> Tuple[Observation, Dict]: agents_view , info = self._env.reset() obs = self._create_obs(agents_view, info) - return obs, info + dones = np.zeros((self.num_envs, 1)) + return obs, dones, info - def step(self) -> Tuple[Observation, NDArray, NDArray, NDArray, Dict]: + def step(self, actions : NDArray) -> Tuple[Observation, NDArray, NDArray, NDArray, Dict]: self.step_count += 1 - agents_view, reward, terminated, truncated, info = self._env.step() + agents_view, reward, terminated, truncated, info = self._env.step(actions) obs = self._create_obs(agents_view, info) return obs, reward, terminated, truncated, info From a6deae270fbbd8bbb81c8fc507e5c974f10f66df Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 18 Jun 2024 16:24:16 +0100 Subject: [PATCH 012/139] fix: info only contains the action_mask and reformated (n_agents, n_env) ->(n_env, n_agents) --- mava/utils/make_env.py | 1 - mava/wrappers/gym.py | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 61b379fd7..7f5a5a0fb 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -229,7 +229,6 @@ def create_gym_env( config: DictConfig, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) - _gym_registry wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: pass # todo : add agent id wrapper for gym . diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 2c6597830..be4fe40fc 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -56,7 +56,7 @@ def reset(self) -> Tuple: seed=np.random.randint(1) ) # todo: assure reproducibility, this only works for rware - info["action_mask"] = self._get_actions_mask(info) + info = {"action_mask" : self._get_actions_mask(info)} return np.array(agents_view), info @@ -76,7 +76,7 @@ def step(self, actions: NDArray) -> Tuple: ) return agents_view, reward, terminated, truncated, info - info["action_mask"] = self._get_actions_mask(info) + info = {"action_mask" : self._get_actions_mask(info)} if self.use_individual_rewards: reward = np.array(reward) @@ -108,20 +108,21 @@ def __init__(self, env: gym.vector.AsyncVectorEnv): def reset(self) -> Tuple[Observation, Dict]: agents_view , info = self._env.reset() obs = self._create_obs(agents_view, info) - dones = np.zeros((self.num_envs, 1)) + dones = np.zeros((self.num_envs, self.num_agents)) return obs, dones, info def step(self, actions : NDArray) -> Tuple[Observation, NDArray, NDArray, NDArray, Dict]: self.step_count += 1 + actions = actions.swapaxes(0,1) # num_env, num_ags --> num_ags, num_env as expected by the async env agents_view, reward, terminated, truncated, info = self._env.step(actions) obs = self._create_obs(agents_view, info) - - return obs, reward, terminated, truncated, info + dones = np.logical_or(terminated, truncated) + return obs, reward, dones, info def _create_obs(self, agents_view : NDArray, info: Dict) -> Observation: """Create the observations""" - agents_view = np.array(agents_view) - return Observation(agents_view=agents_view, action_mask=info["action_mask"], step_count=self.step_count) + agents_view = np.stack(agents_view, axis = 1) + return Observation(agents_view=agents_view, action_mask=np.stack(info["action_mask"], axis = 0), step_count=self.step_count) \ No newline at end of file From 1475bd0d7ae465dbdce4b86aa02d55df487ae588 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 22 Jun 2024 12:02:19 +0100 Subject: [PATCH 013/139] chore: removed async gym wrapper --- mava/systems/sebulba/ppo/types.py | 99 +++++++++++++++++++++++++++++++ mava/utils/make_env.py | 3 +- mava/wrappers/gym.py | 44 ++------------ 3 files changed, 106 insertions(+), 40 deletions(-) create mode 100644 mava/systems/sebulba/ppo/types.py diff --git a/mava/systems/sebulba/ppo/types.py b/mava/systems/sebulba/ppo/types.py new file mode 100644 index 000000000..13aeb58c1 --- /dev/null +++ b/mava/systems/sebulba/ppo/types.py @@ -0,0 +1,99 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +import chex +from flax.core.frozen_dict import FrozenDict +from jumanji.types import TimeStep +from optax._src.base import OptState +from typing_extensions import NamedTuple + +from mava.types import Action, Done, HiddenState, State, Value + + +class Params(NamedTuple): + """Parameters of an actor critic network.""" + + actor_params: FrozenDict + critic_params: FrozenDict + + +class OptStates(NamedTuple): + """OptStates of actor critic learner.""" + + actor_opt_state: OptState + critic_opt_state: OptState + + +class HiddenStates(NamedTuple): + """Hidden states for an actor critic learner.""" + + policy_hidden_state: HiddenState + critic_hidden_state: HiddenState + + +class LearnerState(NamedTuple): + """State of the learner.""" + + params: Params + opt_states: OptStates + key: chex.PRNGKey + env_state: State + timestep: TimeStep + + +class RNNLearnerState(NamedTuple): + """State of the `Learner` for recurrent architectures.""" + + params: Params + opt_states: OptStates + key: chex.PRNGKey + env_state: State + timestep: TimeStep + dones: Done + hstates: HiddenStates + + +class PPOTransition(NamedTuple): + """Transition tuple for PPO.""" + + done: Done + action: Action + value: Value + reward: chex.Array + log_prob: chex.Array + obs: chex.Array + info : Dict + +class RNNPPOTransition(NamedTuple): + """Transition tuple for PPO.""" + + done: Done + action: Action + value: Value + reward: chex.Array + log_prob: chex.Array + obs: chex.Array + hstates: HiddenStates + + +class Observation(NamedTuple): + """The observation that the agent sees. + agents_view: the agent's view of the environment. + action_mask: boolean array specifying, for each agent, which action is legal. + """ + + agents_view: chex.Array # (num_agents, num_obs_features) + action_mask: chex.Array # (num_agents, num_actions) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 7f5a5a0fb..8ee391f0c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -46,7 +46,6 @@ ConnectorWrapper, GigastepWrapper, GymRwareWrapper, - AsyncGymWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -241,7 +240,7 @@ def create_gym_env( for _ in range(num_env) ] ) - envs = AsyncGymWrapper(envs) + return envs diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index be4fe40fc..f48c34fcf 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -17,10 +17,14 @@ import gym import numpy as np from numpy.typing import NDArray +import warnings from mava.types import Observation +# Filter out the warnings +warnings.filterwarnings('ignore', module='gym.utils.passive_env_checker') + class GymRwareWrapper(gym.Wrapper): """Wrapper for rware gym environments""" @@ -56,7 +60,7 @@ def reset(self) -> Tuple: seed=np.random.randint(1) ) # todo: assure reproducibility, this only works for rware - info = {"action_mask" : self._get_actions_mask(info)} + info = {"actions_mask" : self._get_actions_mask(info)} return np.array(agents_view), info @@ -76,7 +80,7 @@ def step(self, actions: NDArray) -> Tuple: ) return agents_view, reward, terminated, truncated, info - info = {"action_mask" : self._get_actions_mask(info)} + info = {"actions_mask" : self._get_actions_mask(info)} if self.use_individual_rewards: reward = np.array(reward) @@ -90,39 +94,3 @@ def _get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - - -class AsyncGymWrapper(gym.Wrapper): - """Wrapper for async gym environments""" - - def __init__(self, env: gym.vector.AsyncVectorEnv): - super().__init__(env) - self._env = env - self.step_count = 0 #todo : make sure this is implemented correctly - - action_space = env.single_action_space - self.num_agents = len(action_space) - self.num_actions = action_space[0].n - self.num_envs = env.num_envs - - def reset(self) -> Tuple[Observation, Dict]: - agents_view , info = self._env.reset() - obs = self._create_obs(agents_view, info) - dones = np.zeros((self.num_envs, self.num_agents)) - return obs, dones, info - - def step(self, actions : NDArray) -> Tuple[Observation, NDArray, NDArray, NDArray, Dict]: - - self.step_count += 1 - actions = actions.swapaxes(0,1) # num_env, num_ags --> num_ags, num_env as expected by the async env - agents_view, reward, terminated, truncated, info = self._env.step(actions) - obs = self._create_obs(agents_view, info) - dones = np.logical_or(terminated, truncated) - return obs, reward, dones, info - - - def _create_obs(self, agents_view : NDArray, info: Dict) -> Observation: - """Create the observations""" - agents_view = np.stack(agents_view, axis = 1) - return Observation(agents_view=agents_view, action_mask=np.stack(info["action_mask"], axis = 0), step_count=self.step_count) - \ No newline at end of file From 9fce9c6a463780103bd5e72279fb8e13121d5351 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 22 Jun 2024 14:08:15 +0100 Subject: [PATCH 014/139] feat: gym metric tracker wrapper --- mava/systems/sebulba/ppo/types.py | 3 +- mava/utils/make_env.py | 11 ++--- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 70 +++++++++++++++++++++++++++---- 4 files changed, 70 insertions(+), 16 deletions(-) diff --git a/mava/systems/sebulba/ppo/types.py b/mava/systems/sebulba/ppo/types.py index 13aeb58c1..6e02aa904 100644 --- a/mava/systems/sebulba/ppo/types.py +++ b/mava/systems/sebulba/ppo/types.py @@ -75,7 +75,8 @@ class PPOTransition(NamedTuple): reward: chex.Array log_prob: chex.Array obs: chex.Array - info : Dict + info: Dict + class RNNPPOTransition(NamedTuple): """Transition tuple for PPO.""" diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 8ee391f0c..69fc54623 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -17,7 +17,6 @@ import gym import gym.vector import gym.wrappers -import gym.wrappers import gym.wrappers.compatibility import jaxmarl import jumanji @@ -45,6 +44,7 @@ CleanerWrapper, ConnectorWrapper, GigastepWrapper, + GymRecordEpisodeMetrics, GymRwareWrapper, LbfWrapper, MabraxWrapper, @@ -69,7 +69,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"rware" : GymRwareWrapper} +_gym_registry = {"rware": GymRwareWrapper} def add_extra_wrappers( @@ -231,16 +231,17 @@ def create_gym_env( wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: pass # todo : add agent id wrapper for gym . + env = GymRecordEpisodeMetrics(env) return wrapped_env - num_env = config.arch.num_envs - envs = gym.vector.AsyncVectorEnv( #todo : give them more descriptive names + num_env = config.arch.num_envs + envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names [ lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env) ] ) - + return envs diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 6210ca6ed..e888d9317 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRwareWrapper, AsyncGymWrapper +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index f48c34fcf..69632f1bc 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -12,18 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from typing import Dict, Tuple import gym import numpy as np from numpy.typing import NDArray -import warnings - -from mava.types import Observation +# Filter out the warnings +warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -# Filter out the warnings -warnings.filterwarnings('ignore', module='gym.utils.passive_env_checker') class GymRwareWrapper(gym.Wrapper): """Wrapper for rware gym environments""" @@ -60,8 +58,8 @@ def reset(self) -> Tuple: seed=np.random.randint(1) ) # todo: assure reproducibility, this only works for rware - info = {"actions_mask" : self._get_actions_mask(info)} - + info = {"actions_mask": self._get_actions_mask(info)} + return np.array(agents_view), info def step(self, actions: NDArray) -> Tuple: @@ -80,17 +78,71 @@ def step(self, actions: NDArray) -> Tuple: ) return agents_view, reward, terminated, truncated, info - info = {"actions_mask" : self._get_actions_mask(info)} + info = {"actions_mask": self._get_actions_mask(info)} if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - return agents_view, reward, terminated, truncated, info def _get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + + +class GymRecordEpisodeMetrics(gym.Wrapper): + """Record the episode returns and lengths.""" + + def __init__(self, env: gym.Env): + super().__init__(env) + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0 + + def reset(self) -> Tuple: + + # Reset the env + agents_view, info = self.env.reset() + + # Reset the metrics + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0 + + # Create the metrics dict + metrics = { + "episode_return": self.running_count_episode_return, + "episode_length": self.self.running_count_episode_length, + "is_terminal_step": False, + } + if "won_episode" in info: + metrics["won_episode"] = info["won_episode"] + + return agents_view, metrics + + def step(self, actions: NDArray) -> Tuple: + + # Step the env + agents_view, reward, terminated, truncated, info = self.env.step(actions) + + # Update the metrics + done = np.logical_or(terminated, truncated).all() + + if not done: + self.running_count_episode_return += float(np.mean(reward)) + self.running_count_episode_length += 1 + + else: + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0 + + metrics = { + "episode_return": self.running_count_episode_return, + "episode_length": self.self.running_count_episode_length, + "is_terminal_step": False, + } + if "won_episode" in info: + metrics["won_episode"] = info["won_episode"] + + return agents_view, reward, terminated, truncated, metrics From 055a3266accb82a96808fa95762314dac45646d3 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 10 Jun 2024 20:12:16 +0100 Subject: [PATCH 015/139] feat: init sebulba ippo --- mava/systems/{ => anakin}/__init__.py | 0 mava/systems/{ => anakin}/ppo/__init__.py | 0 mava/systems/{ => anakin}/ppo/ff_ippo.py | 0 mava/systems/{ => anakin}/ppo/ff_mappo.py | 0 mava/systems/{ => anakin}/ppo/rec_ippo.py | 0 mava/systems/{ => anakin}/ppo/rec_mappo.py | 0 mava/systems/{ => anakin}/ppo/types.py | 0 .../{ => anakin}/q_learning/__init__.py | 0 .../{ => anakin}/q_learning/rec_iql.py | 0 mava/systems/{ => anakin}/q_learning/types.py | 0 mava/systems/{ => anakin}/sac/__init__.py | 0 mava/systems/{ => anakin}/sac/ff_isac.py | 0 mava/systems/{ => anakin}/sac/ff_masac.py | 0 mava/systems/{ => anakin}/sac/types.py | 0 mava/systems/sebulba/ppo/ff_ippo.py | 596 +++++++++++++ mava/systems/sebulba/ppo/orig.py | 796 ++++++++++++++++++ 16 files changed, 1392 insertions(+) rename mava/systems/{ => anakin}/__init__.py (100%) rename mava/systems/{ => anakin}/ppo/__init__.py (100%) rename mava/systems/{ => anakin}/ppo/ff_ippo.py (100%) rename mava/systems/{ => anakin}/ppo/ff_mappo.py (100%) rename mava/systems/{ => anakin}/ppo/rec_ippo.py (100%) rename mava/systems/{ => anakin}/ppo/rec_mappo.py (100%) rename mava/systems/{ => anakin}/ppo/types.py (100%) rename mava/systems/{ => anakin}/q_learning/__init__.py (100%) rename mava/systems/{ => anakin}/q_learning/rec_iql.py (100%) rename mava/systems/{ => anakin}/q_learning/types.py (100%) rename mava/systems/{ => anakin}/sac/__init__.py (100%) rename mava/systems/{ => anakin}/sac/ff_isac.py (100%) rename mava/systems/{ => anakin}/sac/ff_masac.py (100%) rename mava/systems/{ => anakin}/sac/types.py (100%) create mode 100644 mava/systems/sebulba/ppo/ff_ippo.py create mode 100644 mava/systems/sebulba/ppo/orig.py diff --git a/mava/systems/__init__.py b/mava/systems/anakin/__init__.py similarity index 100% rename from mava/systems/__init__.py rename to mava/systems/anakin/__init__.py diff --git a/mava/systems/ppo/__init__.py b/mava/systems/anakin/ppo/__init__.py similarity index 100% rename from mava/systems/ppo/__init__.py rename to mava/systems/anakin/ppo/__init__.py diff --git a/mava/systems/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py similarity index 100% rename from mava/systems/ppo/ff_ippo.py rename to mava/systems/anakin/ppo/ff_ippo.py diff --git a/mava/systems/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py similarity index 100% rename from mava/systems/ppo/ff_mappo.py rename to mava/systems/anakin/ppo/ff_mappo.py diff --git a/mava/systems/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py similarity index 100% rename from mava/systems/ppo/rec_ippo.py rename to mava/systems/anakin/ppo/rec_ippo.py diff --git a/mava/systems/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py similarity index 100% rename from mava/systems/ppo/rec_mappo.py rename to mava/systems/anakin/ppo/rec_mappo.py diff --git a/mava/systems/ppo/types.py b/mava/systems/anakin/ppo/types.py similarity index 100% rename from mava/systems/ppo/types.py rename to mava/systems/anakin/ppo/types.py diff --git a/mava/systems/q_learning/__init__.py b/mava/systems/anakin/q_learning/__init__.py similarity index 100% rename from mava/systems/q_learning/__init__.py rename to mava/systems/anakin/q_learning/__init__.py diff --git a/mava/systems/q_learning/rec_iql.py b/mava/systems/anakin/q_learning/rec_iql.py similarity index 100% rename from mava/systems/q_learning/rec_iql.py rename to mava/systems/anakin/q_learning/rec_iql.py diff --git a/mava/systems/q_learning/types.py b/mava/systems/anakin/q_learning/types.py similarity index 100% rename from mava/systems/q_learning/types.py rename to mava/systems/anakin/q_learning/types.py diff --git a/mava/systems/sac/__init__.py b/mava/systems/anakin/sac/__init__.py similarity index 100% rename from mava/systems/sac/__init__.py rename to mava/systems/anakin/sac/__init__.py diff --git a/mava/systems/sac/ff_isac.py b/mava/systems/anakin/sac/ff_isac.py similarity index 100% rename from mava/systems/sac/ff_isac.py rename to mava/systems/anakin/sac/ff_isac.py diff --git a/mava/systems/sac/ff_masac.py b/mava/systems/anakin/sac/ff_masac.py similarity index 100% rename from mava/systems/sac/ff_masac.py rename to mava/systems/anakin/sac/ff_masac.py diff --git a/mava/systems/sac/types.py b/mava/systems/anakin/sac/types.py similarity index 100% rename from mava/systems/sac/types.py rename to mava/systems/anakin/sac/types.py diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py new file mode 100644 index 000000000..c9a2069b2 --- /dev/null +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -0,0 +1,596 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from typing import Any, Dict, Tuple + +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from jumanji.env import Environment +from omegaconf import DictConfig, OmegaConf +from optax._src.base import OptState +from rich.pretty import pprint + +from mava.evaluator import make_eval_fns +from mava.networks import FeedForwardActor as Actor +from mava.networks import FeedForwardValueNet as Critic +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + merge_leading_dims, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def get_learner_fn( + env: Environment, + apply_fns: Tuple[ActorApply, CriticApply], + update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], + config: DictConfig, +) -> LearnerFn[LearnerState]: + """Get the learner function.""" + + # Get apply and update functions for actor and critic networks. + actor_apply_fn, critic_apply_fn = apply_fns + actor_update_fn, critic_update_fn = update_fns + + def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + learner_state (NamedTuple): + - params (Params): The current model parameters. + - opt_states (OptStates): The current optimizer states. + - key (PRNGKey): The random number generator state. + - env_state (State): The environment state. + - last_timestep (TimeStep): The last timestep in the current trajectory. + _ (Any): The current metrics info. + """ + + def _env_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, PPOTransition]: + """Step the environment.""" + params, opt_states, key, env_state, last_timestep = learner_state + + # SELECT ACTION + key, policy_key = jax.random.split(key) + actor_policy = actor_apply_fn(params.actor_params, last_timestep.observation) + value = critic_apply_fn(params.critic_params, last_timestep.observation) + + action = actor_policy.sample(seed=policy_key) + log_prob = actor_policy.log_prob(action) + + # STEP ENVIRONMENT + env_state, timestep = jax.vmap(env.step, in_axes=(0, 0))(env_state, action) + + # LOG EPISODE METRICS + done = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + timestep.last(), + ) + info = timestep.extras["episode_metrics"] + + transition = PPOTransition( + done, action, value, timestep.reward, log_prob, last_timestep.observation, info + ) + learner_state = LearnerState(params, opt_states, key, env_state, timestep) + return learner_state, transition + + # STEP ENVIRONMENT FOR ROLLOUT LENGTH + learner_state, traj_batch = jax.lax.scan( + _env_step, learner_state, None, config.system.rollout_length + ) + + # CALCULATE ADVANTAGE + params, opt_states, key, env_state, last_timestep = learner_state + last_val = critic_apply_fn(params.critic_params, last_timestep.observation) + + def _calculate_gae( + traj_batch: PPOTransition, last_val: chex.Array + ) -> Tuple[chex.Array, chex.Array]: + """Calculate the GAE.""" + + def _get_advantages(gae_and_next_value: Tuple, transition: PPOTransition) -> Tuple: + """Calculate the GAE for a single transition.""" + gae, next_value = gae_and_next_value + done, value, reward = ( + transition.done, + transition.value, + transition.reward, + ) + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae + return (gae, value), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(last_val), last_val), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + advantages, targets = _calculate_gae(traj_batch, last_val) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_states, key = train_state + traj_batch, advantages, targets = batch_info + + def _actor_loss_fn( + actor_params: FrozenDict, + actor_opt_state: OptState, + traj_batch: PPOTransition, + gae: chex.Array, + key: chex.PRNGKey, + ) -> Tuple: + """Calculate the actor loss.""" + # RERUN NETWORK + actor_policy = actor_apply_fn(actor_params, traj_batch.obs) + log_prob = actor_policy.log_prob(traj_batch.action) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + # The seed will be used in the TanhTransformedDistribution: + entropy = actor_policy.entropy(seed=key).mean() + + total_loss_actor = loss_actor - config.system.ent_coef * entropy + return total_loss_actor, (loss_actor, entropy) + + def _critic_loss_fn( + critic_params: FrozenDict, + critic_opt_state: OptState, + traj_batch: PPOTransition, + targets: chex.Array, + ) -> Tuple: + """Calculate the critic loss.""" + # RERUN NETWORK + value = critic_apply_fn(critic_params, traj_batch.obs) + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + value_losses = jnp.square(value - targets) + value_losses_clipped = jnp.square(value_pred_clipped - targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + critic_total_loss = config.system.vf_coef * value_loss + return critic_total_loss, (value_loss) + + # CALCULATE ACTOR LOSS + key, entropy_key = jax.random.split(key) + actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) + actor_loss_info, actor_grads = actor_grad_fn( + params.actor_params, + opt_states.actor_opt_state, + traj_batch, + advantages, + entropy_key, + ) + + # CALCULATE CRITIC LOSS + critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) + critic_loss_info, critic_grads = critic_grad_fn( + params.critic_params, opt_states.critic_opt_state, traj_batch, targets + ) + + # Compute the parallel mean (pmean) over the batch. + # This calculation is inspired by the Anakin architecture demo notebook. + # available at https://tinyurl.com/26tdzs5x + # This pmean could be a regular mean as the batch axis is on the same device. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="batch" + ) + # pmean over devices. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="device" + ) + + critic_grads, critic_loss_info = jax.lax.pmean( + (critic_grads, critic_loss_info), axis_name="batch" + ) + # pmean over devices. + critic_grads, critic_loss_info = jax.lax.pmean( + (critic_grads, critic_loss_info), axis_name="device" + ) + + # UPDATE ACTOR PARAMS AND OPTIMISER STATE + actor_updates, actor_new_opt_state = actor_update_fn( + actor_grads, opt_states.actor_opt_state + ) + actor_new_params = optax.apply_updates(params.actor_params, actor_updates) + + # UPDATE CRITIC PARAMS AND OPTIMISER STATE + critic_updates, critic_new_opt_state = critic_update_fn( + critic_grads, opt_states.critic_opt_state + ) + critic_new_params = optax.apply_updates(params.critic_params, critic_updates) + + # PACK NEW PARAMS AND OPTIMISER STATE + new_params = Params(actor_new_params, critic_new_params) + new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) + + # PACK LOSS INFO + total_loss = actor_loss_info[0] + critic_loss_info[0] + value_loss = critic_loss_info[1] + actor_loss = actor_loss_info[1][0] + entropy = actor_loss_info[1][1] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + return (new_params, new_opt_state, entropy_key), loss_info + + params, opt_states, traj_batch, advantages, targets, key = update_state + key, shuffle_key, entropy_key = jax.random.split(key, 3) + + # SHUFFLE MINIBATCHES + batch_size = config.system.rollout_length * config.arch.num_envs + permutation = jax.random.permutation(shuffle_key, batch_size) + batch = (traj_batch, advantages, targets) + batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = jax.tree_util.tree_map( + lambda x: jnp.take(x, permutation, axis=0), batch + ) + minibatches = jax.tree_util.tree_map( + lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), + shuffled_batch, + ) + + # UPDATE MINIBATCHES + (params, opt_states, entropy_key), loss_info = jax.lax.scan( + _update_minibatch, (params, opt_states, entropy_key), minibatches + ) + + update_state = (params, opt_states, traj_batch, advantages, targets, key) + return update_state, loss_info + + update_state = (params, opt_states, traj_batch, advantages, targets, key) + + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, key = update_state + learner_state = LearnerState(params, opt_states, key, env_state, last_timestep) + metric = traj_batch.info + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + learner_state (NamedTuple): + - params (Params): The initial model parameters. + - opt_states (OptStates): The initial optimizer state. + - key (chex.PRNGKey): The random number generator state. + - env_state (LogEnvState): The environment state. + - timesteps (TimeStep): The initial timestep in the initial trajectory. + """ + + batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + + learner_state, (episode_info, loss_info) = jax.lax.scan( + batched_update_step, learner_state, None, config.system.num_updates_per_eval + ) + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + env: Environment, keys: chex.Array, config: DictConfig +) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + # Get available TPU cores. + devices = jax.devices() + learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] + n_devices = len(learner_devices) + + # Get number of agents. + config.system.num_agents = env.num_agents + + # PRNG keys. + key, actor_net_key, critic_net_key = keys + + # Define network and optimiser. + actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) + actor_action_head = hydra.utils.instantiate( + config.network.action_head, action_dim=env.action_dim + ) + critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) + + actor_network = Actor(torso=actor_torso, action_head=actor_action_head) + critic_network = Critic(torso=critic_torso) + + actor_lr = make_learning_rate(config.system.actor_lr, config) + critic_lr = make_learning_rate(config.system.critic_lr, config) + + actor_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(actor_lr, eps=1e-5), + ) + critic_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(critic_lr, eps=1e-5), + ) + + # Initialise observation with obs of all agents. + obs = env.single_observation_space.sample() + init_x = jax.tree_util.tree_map(lambda x: x[jnp.newaxis, ...], obs) + + # Initialise actor params and optimiser state. + actor_params = actor_network.init(actor_net_key, init_x) + actor_opt_state = actor_optim.init(actor_params) + + # Initialise critic params and optimiser state. + critic_params = critic_network.init(critic_net_key, init_x) + critic_opt_state = critic_optim.init(critic_params) + + # Pack params. + params = Params(actor_params, critic_params) + + # Pack apply and update functions. + apply_fns = (actor_network.apply, critic_network.apply) + update_fns = (actor_optim.update, critic_optim.update) + + # Get batched iterated update and replicate it to pmap it over cores. + learn = get_learner_fn(env, apply_fns, update_fns, config) + learn = jax.pmap(learn, axis_name="device", devices = learner_devices) + + # Initialise environment states and timesteps: across devices and batches. + key, *env_keys = jax.random.split( + key, n_devices * config.system.update_batch_size * config.arch.num_envs + 1 + ) + env_states, timesteps = jax.vmap(env.reset, in_axes=(0))( + jnp.stack(env_keys), + ) + reshape_states = lambda x: x.reshape( + (n_devices, config.system.update_batch_size, config.arch.num_envs) + x.shape[1:] + ) + # (devices, update batch size, num_envs, ...) + env_states = jax.tree_map(reshape_states, env_states) + timesteps = jax.tree_map(reshape_states, timesteps) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, _ = loaded_checkpoint.restore_params(input_params=params) + # Update the params + params = restored_params + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + opt_states = OptStates(actor_opt_state, critic_opt_state) + replicate_learner = (params, opt_states, step_keys) + + # Duplicate learner for update_batch_size. + broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size,) + x.shape) + replicate_learner = jax.tree_map(broadcast, replicate_learner) + + # Duplicate learner across devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=jax.devices()) + + # Initialise learner state. + params, opt_states, step_keys = replicate_learner + init_learner_state = LearnerState(params, opt_states, step_keys, env_states, timesteps) + + return learn, actor_network, init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + n_devices = len(jax.devices()) + + # Create the enviroments for train and eval. + env, eval_env = environments.make(config) + + # PRNG keys. + key, key_e, actor_net_key, critic_net_key = jax.random.split( + jax.random.PRNGKey(config.system.seed), num=4 + ) + + # Setup learner. + learn, actor_network, learner_state = learner_setup( + env, (key, actor_net_key, critic_net_key), config + ) + + # Setup evaluator. + # One key per device for evaluation. + eval_keys = jax.random.split(key_e, n_devices) + evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) + + # Calculate total timesteps. + config = check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + steps_per_rollout = ( + n_devices + * config.system.num_updates_per_eval + * config.system.rollout_length + * config.system.update_batch_size + * config.arch.num_envs + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Run experiment for a total number of evaluations. + max_episode_return = -jnp.inf + best_params = None + for eval_step in range(config.arch.num_evaluation): + # Train. + start_time = time.time() + + learner_output = learn(learner_state) + jax.block_until_ready(learner_output) + + # Log the results of the training. + elapsed_time = time.time() - start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + + # Prepare for evaluation. + start_time = time.time() + + trained_params = unreplicate_batch_dim(learner_state.params.actor_params) + key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) + eval_keys = jnp.stack(eval_keys) + eval_keys = eval_keys.reshape(n_devices, -1) + + # Evaluate. + evaluator_output = evaluator(trained_params, eval_keys) + jax.block_until_ready(evaluator_output) + + # Log the results of the evaluation. + elapsed_time = time.time() - start_time + episode_return = jnp.mean(evaluator_output.episode_metrics["episode_return"]) + + steps_per_eval = int(jnp.sum(evaluator_output.episode_metrics["episode_length"])) + evaluator_output.episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(evaluator_output.episode_metrics, t, eval_step, LogEvent.EVAL) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(trained_params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(evaluator_output.episode_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + start_time = time.time() + + key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) + eval_keys = jnp.stack(eval_keys) + eval_keys = eval_keys.reshape(n_devices, -1) + + evaluator_output = absolute_metric_evaluator(best_params, eval_keys) + jax.block_until_ready(evaluator_output) + + elapsed_time = time.time() - start_time + steps_per_eval = int(jnp.sum(evaluator_output.episode_metrics["episode_length"])) + t = int(steps_per_rollout * (eval_step + 1)) + evaluator_output.episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(evaluator_output.episode_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + +@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + + # Run experiment. + eval_performance = run_experiment(cfg) + print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/sebulba/ppo/orig.py b/mava/systems/sebulba/ppo/orig.py new file mode 100644 index 000000000..85b679305 --- /dev/null +++ b/mava/systems/sebulba/ppo/orig.py @@ -0,0 +1,796 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mava.utils.sebulba_utils import configure_computation_environment + +configure_computation_environment() # noqa: E402 + +import copy +import queue +import threading +import time +from collections import deque +from typing import Any, Dict, List, Tuple + +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import numpy as np +import optax +from chex import PRNGKey +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import get_sebulba_ff_evaluator as evaluator_setup +from mava.logger import Logger +from mava.networks import get_networks +from mava.types import ( + ActorApply, + CriticApply, + LearnerState, + Observation, + OptStates, + Params, +) +from mava.types import PPOTransition as Transition +from mava.types import SebulbaLearnerFn as LearnerFn +from mava.types import SingleDeviceFn +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax import merge_leading_dims +from mava.utils.make_env import make + + +def rollout( # noqa: CCR001 + rng: PRNGKey, + config: DictConfig, + rollout_queue: queue.Queue, + params_queue: queue.Queue, + device_thread_id: int, + apply_fns: Tuple, + logger: Logger, + learner_devices: List, +) -> None: + """Executor rollout loop.""" + # Create envs + envs = make(config)(config.arch.num_envs) # type: ignore + + # Setup + len_executor_device_ids = len(config.arch.executor_device_ids) + t_env = 0 + start_time = time.time() + + # Get the apply functions for the actor and critic networks. + vmap_actor_apply, vmap_critic_apply = apply_fns + + # Define the util functions: select action function and prepare data to share it with learner. + @jax.jit + def get_action_and_value( + params: FrozenDict, + observation: Observation, + key: PRNGKey, + ) -> Tuple: + """Get action and value.""" + key, subkey = jax.random.split(key) + + policy = vmap_actor_apply(params.actor_params, observation) + action, logprob = policy.sample_and_log_prob(seed=subkey) + + value = vmap_critic_apply(params.critic_params, observation).squeeze() + return action, logprob, value, key + + @jax.jit + def prepare_data(storage: List[Transition]) -> Transition: + """Prepare data to share with learner.""" + return jax.tree_map( # type: ignore + lambda *xs: jnp.split(jnp.stack(xs), len(learner_devices), axis=1), *storage + ) + + # Define the episode info + env_id = np.arange(config.arch.num_envs) + # Accumulated episode returns + episode_returns = np.zeros((config.arch.num_envs,), dtype=np.float32) + # Final episode returns + returned_episode_returns = np.zeros((config.arch.num_envs,), dtype=np.float32) + # Accumulated episode lengths + episode_lengths = np.zeros((config.arch.num_envs,), dtype=np.float32) + # Final episode lengths + returned_episode_lengths = np.zeros((config.arch.num_envs,), dtype=np.float32) + + # Define the data structure + params_queue_get_time: deque = deque(maxlen=10) + rollout_time: deque = deque(maxlen=10) + rollout_queue_put_time: deque = deque(maxlen=10) + + # Reset envs + next_obs, infos = envs.reset() + next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + + # Loop till the learner has finished training + for update in range(1, config.system.num_updates + 2): + # Setup + env_recv_time: float = 0 + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 + + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + if config.arch.concurrency: + if update != 2: + params = params_queue.get() + params.network_params["params"]["Dense_0"]["kernel"].block_until_ready() + else: + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) + + # Rollout + rollout_time_start = time.time() + storage: List = [] + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): + # Get previous step info + cached_next_obs = next_obs + cached_next_dones = next_dones + cashed_action_mask = np.stack(infos["actions_mask"]) + + # Increment current timestep + t_env += ( + config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs + ) + + # Get action and value + inference_time_start = time.time() + ( + action, + logprob, + value, + rng, + ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), rng) + inference_time += time.time() - inference_time_start + + # Step the environment + env_send_time_start = time.time() + cpu_action = np.array(action) + next_obs, next_reward, terminated, truncated, infos = envs.step(cpu_action) + next_done = terminated + truncated + next_dones = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + (next_done), + ) + + # Append data to storage + env_send_time += time.time() - env_send_time_start + storage_time_start = time.time() + storage.append( + Transition( + done=cached_next_dones, + action=action, + value=value, + reward=next_reward, + log_prob=logprob, + obs=cached_next_obs, + info=np.stack(infos["actions_mask"]), # Add action mask to info + ) + ) + storage_time += time.time() - storage_time_start + + # Update episode info + episode_returns[env_id] += np.mean(next_reward) + returned_episode_returns[env_id] = np.where( + next_done, + episode_returns[env_id], + returned_episode_returns[env_id], + ) + episode_returns[env_id] *= (1 - next_done) * (1 - truncated) + episode_lengths[env_id] += 1 + returned_episode_lengths[env_id] = np.where( + next_done, + episode_lengths[env_id], + returned_episode_lengths[env_id], + ) + episode_lengths[env_id] *= (1 - next_done) * (1 - truncated) + rollout_time.append(time.time() - rollout_time_start) + + # Prepare data to share with learner + partitioned_storage = prepare_data(storage) + sharded_storage = Transition( + *list( # noqa: C417 + map( + lambda x: jax.device_put_sharded(x, devices=learner_devices), # type: ignore + partitioned_storage, + ) + ) + ) + sharded_next_obs = jax.device_put_sharded( + np.split(next_obs, len(learner_devices)), devices=learner_devices + ) + sharded_next_done = jax.device_put_sharded( + np.split(next_dones, len(learner_devices)), devices=learner_devices + ) + sharded_next_action_mask = jax.device_put_sharded( + np.split(np.stack(infos["actions_mask"]), len(learner_devices)), devices=learner_devices + ) + payload = ( + t_env, + sharded_storage, + sharded_next_obs, + sharded_next_done, + sharded_next_action_mask, + np.mean(params_queue_get_time), + ) + + # Put data in the rollout queue to share it with the learner + rollout_queue_put_time_start = time.time() + rollout_queue.put(payload) + rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) + + if (update % config.arch.log_frequency == 0) or (config.system.num_updates + 1 == update): + # Log info + logger.log_executor_metrics( + t_env=t_env, + metrics={ + "episodes_info": { + "episode_return": returned_episode_returns, + "episode_length": returned_episode_lengths, + "steps_per_second": int(t_env / (time.time() - start_time)), + }, + "speed_info": { + "rollout_time": np.mean(rollout_time), + }, + "queue_info": { + "params_queue_get_time": np.mean(params_queue_get_time), + "env_recv_time": env_recv_time, + "inference_time": inference_time, + "storage_time": storage_time, + "env_send_time": env_send_time, + "rollout_queue_put_time": np.mean(rollout_queue_put_time), + }, + }, + device_thread_id=device_thread_id, + ) + + +def get_learner_fn( + apply_fns: Tuple[ActorApply, CriticApply], + update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], + config: DictConfig, +) -> LearnerFn: + """Get the learner function.""" + # Get apply and update functions for actor and critic networks. + actor_apply_fn, critic_apply_fn = apply_fns + actor_update_fn, critic_update_fn = update_fns + + def single_device_update( + agents_state: LearnerState, + traj_batch: Transition, + last_observation: Observation, + rng: PRNGKey, + ) -> Tuple[LearnerState, chex.PRNGKey, Tuple]: + params, opt_states, _, _, _ = agents_state + + def _calculate_gae( + traj_batch: Transition, last_val: chex.Array + ) -> Tuple[chex.Array, chex.Array]: + """Calculate the GAE.""" + + def _get_advantages(gae_and_next_value: Tuple, transition: Transition) -> Tuple: + """Calculate the GAE for a single transition.""" + gae, next_value = gae_and_next_value + done, value, reward = ( + transition.done, + transition.value, + transition.reward, + ) + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae + return (gae, value), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(last_val), last_val), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + # Calculate GAE + last_val = critic_apply_fn(params.critic_params, last_observation) + advantages, targets = _calculate_gae(traj_batch, last_val) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_states = train_state + traj_batch, advantages, targets = batch_info + + def _actor_loss_fn( + actor_params: FrozenDict, + actor_opt_state: OptStates, + traj_batch: Transition, + gae: chex.Array, + ) -> Tuple: + """Calculate the actor loss.""" + # RERUN NETWORK + actor_policy = actor_apply_fn(actor_params, traj_batch.obs) + log_prob = actor_policy.log_prob(traj_batch.action) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + entropy = actor_policy.entropy().mean() + + total_loss_actor = loss_actor - config.system.ent_coef * entropy + return total_loss_actor, (loss_actor, entropy) + + def _critic_loss_fn( + critic_params: FrozenDict, + critic_opt_state: OptStates, + traj_batch: Transition, + targets: chex.Array, + ) -> Tuple: + """Calculate the critic loss.""" + # RERUN NETWORK + value = critic_apply_fn(critic_params, traj_batch.obs) + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + value_losses = jnp.square(value - targets) + value_losses_clipped = jnp.square(value_pred_clipped - targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + critic_total_loss = config.system.vf_coef * value_loss + return critic_total_loss, (value_loss) + + # CALCULATE ACTOR LOSS + actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) + actor_loss_info, actor_grads = actor_grad_fn( + params.actor_params, opt_states.actor_opt_state, traj_batch, advantages + ) + + # CALCULATE CRITIC LOSS + critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) + critic_loss_info, critic_grads = critic_grad_fn( + params.critic_params, opt_states.critic_opt_state, traj_batch, targets + ) + + # Compute the parallel mean (pmean) over the learner devices. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="local_devices" + ) + critic_grads, critic_loss_info = jax.lax.pmean( + (critic_grads, critic_loss_info), axis_name="local_devices" + ) + + # UPDATE ACTOR PARAMS AND OPTIMISER STATE + actor_updates, actor_new_opt_state = actor_update_fn( + actor_grads, opt_states.actor_opt_state + ) + actor_new_params = optax.apply_updates(params.actor_params, actor_updates) + + # UPDATE CRITIC PARAMS AND OPTIMISER STATE + critic_updates, critic_new_opt_state = critic_update_fn( + critic_grads, opt_states.critic_opt_state + ) + critic_new_params = optax.apply_updates(params.critic_params, critic_updates) + + # PACK NEW PARAMS AND OPTIMISER STATE + new_params = Params(actor_new_params, critic_new_params) + new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) + + # PACK LOSS INFO + total_loss = actor_loss_info[0] + critic_loss_info[0] + value_loss = critic_loss_info[1] + actor_loss = actor_loss_info[1][0] + entropy = actor_loss_info[1][1] + loss_info = (total_loss, value_loss, actor_loss, entropy) + + return (new_params, new_opt_state), loss_info + + params, opt_states, traj_batch, advantages, targets, rng = update_state + rng, shuffle_rng = jax.random.split(rng) + + # SHUFFLE MINIBATCHES + batch_size = config.system.rollout_length * config.arch.num_envs + permutation = jax.random.permutation(shuffle_rng, batch_size) + batch = (traj_batch, advantages, targets) + batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = jax.tree_util.tree_map( + lambda x: jnp.take(x, permutation, axis=0), batch + ) + minibatches = jax.tree_util.tree_map( + lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), + shuffled_batch, + ) + + # UPDATE MINIBATCHES + (params, opt_states), loss_info = jax.lax.scan( + _update_minibatch, (params, opt_states), minibatches + ) + + update_state = (params, opt_states, traj_batch, advantages, targets, rng) + return update_state, loss_info + + update_state = (params, opt_states, traj_batch, advantages, targets, rng) + + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, rng = update_state + learner_state = agents_state._replace(params=params, opt_states=opt_states) + return learner_state, rng, loss_info + + def learner_fn( + agents_state: LearnerState, + sharded_storages: List, + sharded_next_obs: List, + sharded_next_done: List, + sharded_next_action_mask: List, + key: chex.PRNGKey, + ) -> Tuple: + """Single device update.""" + # Horizontal stack all the data from different devices + traj_batch = jax.tree_map(lambda *x: jnp.hstack(x), *sharded_storages) + traj_batch = traj_batch._replace(obs=Observation(traj_batch.obs, traj_batch.info)) + + # Get last observation + last_obs = jnp.concatenate(sharded_next_obs) + last_action_mask = jnp.concatenate(sharded_next_action_mask) + last_observation = Observation(last_obs, last_action_mask) + + # Update learner + agents_state, key, (total_loss, value_loss, actor_loss, entropy) = single_device_update( + agents_state, traj_batch, last_observation, key + ) + + # Pack loss info + loss_info = { + "total_loss": total_loss, + "loss_actor": actor_loss, + "value_loss": value_loss, + "entropy": entropy, + } + return agents_state, key, loss_info + + return learner_fn + + +def learner_setup( + rngs: chex.Array, config: DictConfig, learner_devices: List +) -> Tuple[SingleDeviceFn, LearnerState, Tuple[ActorApply, ActorApply]]: + """Initialise learner_fn, network, optimiser, environment and states.""" + # Get number of actions and agents. + dummy_envs = make(config)( # type: ignore + config.arch.num_envs # Create dummy_envs to get observation and action spaces + ) + config.system.num_agents = dummy_envs.single_observation_space.shape[0] + config.system.num_actions = int(dummy_envs.single_action_space.nvec[0]) + + # PRNG keys. + actor_net_key, critic_net_key = rngs + + # Define network and optimiser. + actor_network, critic_network = get_networks( + config=config, network="feedforward", centralised_critic=False + ) + actor_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(config.system.actor_lr, eps=1e-5), + ) + critic_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(config.system.critic_lr, eps=1e-5), + ) + + # Initialise observation: Select only obs for a single agent. + init_obs = np.array([dummy_envs.single_observation_space.sample()[0]]) + init_action_mask = np.ones((1, config.system.num_actions)) + init_x = Observation(init_obs, init_action_mask) + + # Initialise actor params and optimiser state. + actor_params = actor_network.init(actor_net_key, init_x) + actor_opt_state = actor_optim.init(actor_params) + + # Initialise critic params and optimiser state. + critic_params = critic_network.init(critic_net_key, init_x) + critic_opt_state = critic_optim.init(critic_params) + + # Vmap network apply function over number of agents. + vmapped_actor_network_apply_fn = jax.vmap( + actor_network.apply, + in_axes=(None, Observation(1, 1, None)), + out_axes=(1), + ) + vmapped_critic_network_apply_fn = jax.vmap( + critic_network.apply, + in_axes=(None, Observation(1, 1, None)), + out_axes=(1), + ) + + # Pack apply and update functions. + apply_fns = (vmapped_actor_network_apply_fn, vmapped_critic_network_apply_fn) + update_fns = (actor_optim.update, critic_optim.update) + + # Define agents state + agents_state = LearnerState( + params=Params( + actor_params=actor_params, + critic_params=critic_params, + ), + opt_states=OptStates( + actor_opt_state=actor_opt_state, + critic_opt_state=critic_opt_state, + ), + ) + # Replicate agents state per learner device + agents_state = flax.jax_utils.replicate(agents_state, devices=learner_devices) + + # Get Learner function: pmap over learner devices. + single_device_update = get_learner_fn(apply_fns, update_fns, config) + multi_device_update = jax.pmap( + single_device_update, + axis_name="local_devices", + devices=learner_devices, + ) + + # Close dummy envs. + dummy_envs.close() + + return multi_device_update, agents_state, apply_fns + + +def run_experiment(_config: DictConfig) -> None: # noqa: CCR001 + """Runs experiment.""" + config = copy.deepcopy(_config) + + # Setup device distribution. + local_devices = jax.local_devices() #why are we using local devices insted of devices? ------------------------------------------------------------------------------------------------------------------------------------ define a ratio insted of the devices to use? + learner_devices = [local_devices[d_id] for d_id in config.arch.learner_device_ids] + + # PRNG keys. + rng, rng_e, actor_net_key, critic_net_key = jax.random.split( + jax.random.PRNGKey(config.system.seed), num=4 + ) + learner_keys = jax.device_put_replicated(rng, learner_devices) + + # Sanity check of config + assert ( + config.arch.num_envs % len(config.arch.learner_device_ids) == 0 + ), "local_num_envs must be divisible by len(learner_device_ids)" + #each thread is going to devide needs to give an equal number of traj to each learning device? shound't each actor Thread have a designated N learneres? If we have less actor T than learners then ech actor will devide based on the num_env and gives to N actors, ig to lessen the managment each actor gives to all of the learners? + #this deviates from the paper? + assert ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.arch.n_threads_per_executor + % config.system.num_minibatches + == 0 + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" #this one makes sense but the assertion is a bit off? + + # Setup learner. + ( + multi_device_update, + agents_state, + apply_fns, + ) = learner_setup((actor_net_key, critic_net_key), config, learner_devices) + + # Setup evaluator. + eval_envs = make(config)(config.arch.num_eval_episodes) # type: ignore + evaluator = evaluator_setup(eval_envs=eval_envs, apply_fn=apply_fns[0], config=config) + + # Calculate total timesteps. + batch_size = int( + config.arch.num_envs + * config.system.rollout_length + * config.arch.n_threads_per_executor + * len(config.arch.executor_device_ids) + ) + config.system.total_timesteps = config.system.num_updates * batch_size + + # Setup logger. + config.arch.log_frequency = config.system.num_updates // config.arch.num_evaluation + logger = Logger(config) + cfg_dict: Dict = OmegaConf.to_container(config, resolve=True) + pprint(cfg_dict) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=cfg_dict, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + if config.logger.checkpointing.load_model: + print( + f"{Fore.RED}{Style.BRIGHT}Loading checkpoint is not supported\ + for sebulba architecture yet{Style.RESET_ALL}" + ) + + # Executor setup and launch. + unreplicated_params = flax.jax_utils.unreplicate(agents_state.params) + params_queues: List = [] + rollout_queues: List = [] + for d_idx, d_id in enumerate( # Loop through each executor device + config.arch.executor_device_ids + ): + # Replicate params per executor device + device_params = jax.device_put(unreplicated_params, local_devices[d_id]) + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + params_queues.append(queue.Queue(maxsize=1)) + rollout_queues.append(queue.Queue(maxsize=1)) + params_queues[-1].put(device_params) + threading.Thread( + target=rollout, + args=( + jax.device_put(rng, local_devices[d_id]), + config, + rollout_queues[-1], + params_queues[-1], + d_idx * config.arch.n_threads_per_executor + thread_id, + apply_fns, + logger, + learner_devices, + ), + ).start() + + # Run experiment for the total number of updates. + rollout_queue_get_time: deque = deque(maxlen=10) + data_transfer_time: deque = deque(maxlen=10) + trainer_update_number = 0 + max_episode_return = jnp.float32(0.0) + best_params = None + while True: + trainer_update_number += 1 + rollout_queue_get_time_start = time.time() + sharded_storages = [] + sharded_next_obss = [] + sharded_next_dones = [] + sharded_next_action_masks = [] + + # Loop through each executor device + for d_idx, _ in enumerate(config.arch.executor_device_ids): + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + # Get data from rollout queue + ( + t_env, + sharded_storage, + sharded_next_obs, + sharded_next_done, + sharded_next_action_mask, + avg_params_queue_get_time, + ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() + sharded_storages.append(sharded_storage) + sharded_next_obss.append(sharded_next_obs) + sharded_next_dones.append(sharded_next_done) + sharded_next_action_masks.append(sharded_next_action_mask) + + rollout_queue_get_time.append(time.time() - rollout_queue_get_time_start) + training_time_start = time.time() + + # Update learner + (agents_state, learner_keys, loss_info) = multi_device_update( # type: ignore + agents_state, + sharded_storages, + sharded_next_obss, + sharded_next_dones, + sharded_next_action_masks, + learner_keys, + ) + + # Send updated params to executors + unreplicated_params = flax.jax_utils.unreplicate(agents_state.params) + for d_idx, d_id in enumerate(config.arch.executor_device_ids): + device_params = jax.device_put(unreplicated_params, local_devices[d_id]) + for thread_id in range(config.arch.n_threads_per_executor): + params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( + device_params + ) + + if trainer_update_number % config.arch.log_frequency == 0: + # Logging training info + logger.log_trainer_metrics( + experiment_output={ + "loss_info": loss_info, + "queue_info": { + "rollout_queue_get_time": np.mean(rollout_queue_get_time), + "data_transfer_time": np.mean(data_transfer_time), + "rollout_params_queue_get_time_diff": np.mean(rollout_queue_get_time) + - avg_params_queue_get_time, + "rollout_queue_size": rollout_queues[0].qsize(), + "params_queue_size": params_queues[0].qsize(), + }, + "speed_info": { + "training_time": time.time() - training_time_start, + "trainer_update_number": trainer_update_number, + }, + }, + t_env=t_env, + ) + + # Evaluation + rng_e, _ = jax.random.split(rng_e) + evaluator_output = evaluator(params=unreplicated_params, rng=rng_e) + # Log the results of the evaluation. + episode_return = logger.log_evaluator_metrics( + t_env=t_env, + metrics=evaluator_output, + eval_step=trainer_update_number, + ) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=t_env, + unreplicated_learner_state=flax.jax_utils.unreplicate(agents_state), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(unreplicated_params) + max_episode_return = episode_return + + # Check if training is finished + if trainer_update_number >= config.system.num_updates: + rng_e, _ = jax.random.split(rng_e) + # Measure absolute metric + evaluator_output = evaluator(params=best_params, rng=rng_e, eval_multiplier=10) + # Log the results of the evaluation. + logger.log_evaluator_metrics( + t_env=t_env, + metrics=evaluator_output, + eval_step=trainer_update_number + 1, + absolute_metric=True, + ) + break + + +@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> None: + """Experiment entry point.""" + + # Run experiment. + run_experiment(cfg) + + print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") + + +if __name__ == "__main__": + hydra_entry_point() \ No newline at end of file From a435a0afa12551685255ac25d1332bb2bf21244f Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 13 Jun 2024 23:51:28 +0100 Subject: [PATCH 016/139] feat: initial learner / training loop --- mava/systems/anakin/ppo/ff_ippo.py | 2 +- mava/systems/sebulba/ppo/ff_ippo.py | 480 +++++++++++++++++----------- mava/systems/sebulba/ppo/test.py | 2 +- mava/utils/checkpointing.py | 2 +- 4 files changed, 298 insertions(+), 188 deletions(-) diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index 7b45fb45f..44e196535 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -578,7 +578,7 @@ def run_experiment(_config: DictConfig) -> float: return eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index c9a2069b2..95e722546 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -14,14 +14,17 @@ import copy import time -from typing import Any, Dict, Tuple - +from typing import Any, Dict, Tuple, List +import threading import chex import flax import hydra import jax import jax.numpy as jnp +import numpy as np import optax +import queue +from collections import deque from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict from jumanji.env import Environment @@ -32,8 +35,8 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import ( @@ -47,8 +50,157 @@ from mava.wrappers.episode_metrics import get_final_step_metrics +def rollout( + rng: chex.PRNGKey, + config: DictConfig, + rollout_queue: queue.Queue, + params_queue: queue.Queue, + device_thread_id: int, + apply_fns: Tuple, + logger: MavaLogger, + learner_devices: List): + + #create envs + env = environments.make(config) + + #setup + len_executor_device_ids = len(config.arch.executor_device_ids) + t_env = 0 + start_time = time.time() + + actor_apply_fn, critic_apply_fn = apply_fns + + # Define the util functions: select action function and prepare data to share it with learner. + @jax.jit + def get_action_and_value( + params: FrozenDict, + observation: Observation, + key: chex.PRNGKey, + ) -> Tuple: + """Get action and value.""" + key, subkey = jax.random.split(key) + + policy = actor_apply_fn(params.actor_params, observation) + action, log_prob = policy.sample_and_log_prob(seed=subkey) + + value = critic_apply_fn(params.critic_params, observation).squeeze() + return action, log_prob, value, key + + @jax.jit + def prepare_data(storage: List[PPOTransition]) -> PPOTransition: + """Prepare data to share with learner.""" + return jax.tree_map( # type: ignore + lambda *xs: jnp.split(jnp.stack(xs), len(learner_devices), axis=1), *storage + ) + + + # Define queues to track time + params_queue_get_time: deque = deque(maxlen=10) + rollout_time: deque = deque(maxlen=10) + rollout_queue_put_time: deque = deque(maxlen=10) + + next_obs, next_rewards, next_dones , extra = env.reset() + + # Loop till the learner has finished training + for update in range(1, config.system.num_updates + 2): + # Setup + env_recv_time: float = 0 + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 + + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) + + # Rollout + rollout_time_start = time.time() + storage: List = [] + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): + # Cached for transition + cached_next_obs = next_obs + cached_next_dones = next_dones + + # Increment current timestep + t_env += ( + config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs + ) + + # Get action and value + inference_time_start = time.time() + + ( + action, + log_prob, + value, + rng, + ) = get_action_and_value(params, cached_next_obs, rng) + inference_time += time.time() - inference_time_start + + # Step the environment + env_send_time_start = time.time() + cpu_action = np.array(action) + next_obs, next_reward, next_dones, extra = env.step(cpu_action) + + next_dones = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + (next_dones), + ) + + # Append data to storage + env_send_time += time.time() - env_send_time_start + storage_time_start = time.time() + storage.append( + PPOTransition( + done=cached_next_dones, + action=action, + value=value, + reward=next_reward, + log_prob=log_prob, + obs=cached_next_obs, + info=extra, + ) + ) + storage_time += time.time() - storage_time_start + + rollout_time.append(time.time() - rollout_time_start) + + # Prepare data to share with learner + # todo: investigate the thread --> single learning + partitioned_storage = prepare_data(storage) + sharded_storage = PPOTransition( + *list( # noqa: C417 + map( + lambda x: jax.device_put_sharded(x, devices=learner_devices), # type: ignore + partitioned_storage, + ) + ) + ) + + sharded_next_obs = jax.device_put_sharded( + np.split(next_obs, len(learner_devices)), devices=learner_devices + ) + sharded_next_done = jax.device_put_sharded( + np.split(next_dones, len(learner_devices)), devices=learner_devices + ) + + payload = ( + t_env, + sharded_storage, + sharded_next_obs, + sharded_next_done, + np.mean(params_queue_get_time), + ) + + # Put data in the rollout queue to share it with the learner + rollout_queue_put_time_start = time.time() + rollout_queue.put(payload) + rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) + + def get_learner_fn( - env: Environment, apply_fns: Tuple[ActorApply, CriticApply], update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], config: DictConfig, @@ -59,7 +211,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, _: Any, traj_batch : PPOTransition, last_obs: chex.Array, last_done: chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -77,71 +229,32 @@ def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tup _ (Any): The current metrics info. """ - def _env_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, PPOTransition]: - """Step the environment.""" - params, opt_states, key, env_state, last_timestep = learner_state - - # SELECT ACTION - key, policy_key = jax.random.split(key) - actor_policy = actor_apply_fn(params.actor_params, last_timestep.observation) - value = critic_apply_fn(params.critic_params, last_timestep.observation) - - action = actor_policy.sample(seed=policy_key) - log_prob = actor_policy.log_prob(action) - - # STEP ENVIRONMENT - env_state, timestep = jax.vmap(env.step, in_axes=(0, 0))(env_state, action) - - # LOG EPISODE METRICS - done = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), - timestep.last(), - ) - info = timestep.extras["episode_metrics"] - - transition = PPOTransition( - done, action, value, timestep.reward, log_prob, last_timestep.observation, info - ) - learner_state = LearnerState(params, opt_states, key, env_state, timestep) - return learner_state, transition - - # STEP ENVIRONMENT FOR ROLLOUT LENGTH - learner_state, traj_batch = jax.lax.scan( - _env_step, learner_state, None, config.system.rollout_length - ) - - # CALCULATE ADVANTAGE - params, opt_states, key, env_state, last_timestep = learner_state - last_val = critic_apply_fn(params.critic_params, last_timestep.observation) - - def _calculate_gae( - traj_batch: PPOTransition, last_val: chex.Array + def _calculate_gae( #todo: lake sure this is appropriate + traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array ) -> Tuple[chex.Array, chex.Array]: - """Calculate the GAE.""" - - def _get_advantages(gae_and_next_value: Tuple, transition: PPOTransition) -> Tuple: - """Calculate the GAE for a single transition.""" - gae, next_value = gae_and_next_value - done, value, reward = ( - transition.done, - transition.value, - transition.reward, - ) + def _get_advantages( + carry: Tuple[chex.Array, chex.Array, chex.Array], transition: PPOTransition + ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: + gae, next_value, next_done = carry + done, value, reward = transition.done, transition.value, transition.reward gamma = config.system.gamma - delta = reward + gamma * next_value * (1 - done) - value - gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae - return (gae, value), gae + delta = reward + gamma * next_value * (1 - next_done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae + return (gae, value, done), gae _, advantages = jax.lax.scan( _get_advantages, - (jnp.zeros_like(last_val), last_val), + (jnp.zeros_like(last_val), last_val, last_done), traj_batch, reverse=True, unroll=16, ) return advantages, advantages + traj_batch.value - - advantages, targets = _calculate_gae(traj_batch, last_val) + + # CALCULATE ADVANTAGE + params, opt_states, key, _, _ = learner_state + last_val = critic_apply_fn(params.critic_params, last_obs) + advantages, targets = _calculate_gae(traj_batch, last_val, last_done) def _update_epoch(update_state: Tuple, _: Any) -> Tuple: """Update the network for a single epoch.""" @@ -304,11 +417,11 @@ def _critic_loss_fn( ) params, opt_states, traj_batch, advantages, targets, key = update_state - learner_state = LearnerState(params, opt_states, key, env_state, last_timestep) + learner_state = LearnerState(params, opt_states, key) metric = traj_batch.info return learner_state, (metric, loss_info) - def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: + def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_done: chex.Array) -> ExperimentOutput[LearnerState]: """Learner function. This function represents the learner, it updates the network parameters @@ -325,9 +438,11 @@ def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: """ batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + + partial_batched_update_step = lambda learner_state, xs : batched_update_step(learner_state, xs, traj_batch , last_obs, last_done) learner_state, (episode_info, loss_info) = jax.lax.scan( - batched_update_step, learner_state, None, config.system.num_updates_per_eval + partial_batched_update_step, learner_state, None, config.system.num_updates_per_eval ) return ExperimentOutput( learner_state=learner_state, @@ -339,16 +454,18 @@ def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: def learner_setup( - env: Environment, keys: chex.Array, config: DictConfig + keys: chex.Array, config: DictConfig, learner_devices: List ) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: """Initialise learner_fn, network, optimiser, environment and states.""" # Get available TPU cores. - devices = jax.devices() - learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] n_devices = len(learner_devices) - - # Get number of agents. - config.system.num_agents = env.num_agents + + #create temporory envoirnments. + env = environments.make(config) + # Get number of agents and actions. + action_space = env.single_action_space + config.system.num_agents = len(action_space) + config.system.num_actions = action_space[0].n # PRNG keys. key, actor_net_key, critic_net_key = keys @@ -375,9 +492,10 @@ def learner_setup( optax.adam(critic_lr, eps=1e-5), ) - # Initialise observation with obs of all agents. - obs = env.single_observation_space.sample() - init_x = jax.tree_util.tree_map(lambda x: x[jnp.newaxis, ...], obs) + # Initialise observation: Select only obs for a single agent. + init_obs = np.array([env.single_observation_space.sample()[0]]) + init_action_mask = np.ones((1, config.system.num_actions)) + init_x = Observation(init_obs, init_action_mask) # Initialise actor params and optimiser state. actor_params = actor_network.init(actor_net_key, init_x) @@ -398,20 +516,6 @@ def learner_setup( learn = get_learner_fn(env, apply_fns, update_fns, config) learn = jax.pmap(learn, axis_name="device", devices = learner_devices) - # Initialise environment states and timesteps: across devices and batches. - key, *env_keys = jax.random.split( - key, n_devices * config.system.update_batch_size * config.arch.num_envs + 1 - ) - env_states, timesteps = jax.vmap(env.reset, in_axes=(0))( - jnp.stack(env_keys), - ) - reshape_states = lambda x: x.reshape( - (n_devices, config.system.update_batch_size, config.arch.num_envs) + x.shape[1:] - ) - # (devices, update batch size, num_envs, ...) - env_states = jax.tree_map(reshape_states, env_states) - timesteps = jax.tree_map(reshape_states, timesteps) - # Load model from checkpoint if specified. if config.logger.checkpointing.load_model: loaded_checkpoint = Checkpointer( @@ -424,50 +528,63 @@ def learner_setup( params = restored_params # Define params to be replicated across devices and batches. - key, step_keys = jax.random.split(key) opt_states = OptStates(actor_opt_state, critic_opt_state) - replicate_learner = (params, opt_states, step_keys) + replicate_learner = (params, opt_states) # Duplicate learner for update_batch_size. broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size,) + x.shape) replicate_learner = jax.tree_map(broadcast, replicate_learner) - # Duplicate learner across devices. - replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=jax.devices()) + # Duplicate learner across Learner devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) # Initialise learner state. - params, opt_states, step_keys = replicate_learner - init_learner_state = LearnerState(params, opt_states, step_keys, env_states, timesteps) + params, opt_states = replicate_learner + init_learner_state = LearnerState(params, opt_states) + env.close() - return learn, actor_network, init_learner_state + return learn, apply_fns, init_learner_state def run_experiment(_config: DictConfig) -> float: """Runs experiment.""" config = copy.deepcopy(_config) - n_devices = len(jax.devices()) - - # Create the enviroments for train and eval. - env, eval_env = environments.make(config) + devices = jax.devices() # todo: use local devices insted? + learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] # PRNG keys. key, key_e, actor_net_key, critic_net_key = jax.random.split( jax.random.PRNGKey(config.system.seed), num=4 ) + learner_keys = jax.device_put_replicated(key, learner_devices) + + # Sanity check of config + assert ( + config.arch.num_envs % len(config.arch.learner_device_ids) == 0 + ), "The number of environments need to be divisible by the number of learners " + + assert ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.arch.n_threads_per_executor + % config.system.num_minibatches + == 0 + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" + + # Setup learner. - learn, actor_network, learner_state = learner_setup( - env, (key, actor_net_key, critic_net_key), config + learn, apply_fns , learner_state = learner_setup( + learner_keys, config, learner_devices ) # Setup evaluator. # One key per device for evaluation. - eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) + #eval_keys = jax.random.split(key_e, n_devices) # todo: well add the evaluations :) + #evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. - config = check_total_timesteps(config) + config = check_total_timesteps(config) #todo: update this for sebulba assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." @@ -475,7 +592,8 @@ def run_experiment(_config: DictConfig) -> float: # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation steps_per_rollout = ( - n_devices + config.arch.executor_device_ids + * config.arch.n_threads_per_executor * config.system.num_updates_per_eval * config.system.rollout_length * config.system.update_batch_size @@ -496,91 +614,83 @@ def run_experiment(_config: DictConfig) -> float: model_name=config.logger.system_name, **config.logger.checkpointing.save_args, # Checkpoint args ) - - # Run experiment for a total number of evaluations. - max_episode_return = -jnp.inf + + # Executor setup and launch. + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + params_queues: List = [] + rollout_queues: List = [] + for d_idx, d_id in enumerate( # Loop through each executor device + config.arch.executor_device_ids + ): + # Replicate params per executor device + device_params = jax.device_put(unreplicated_params, devices[d_id]) + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + params_queues.append(queue.Queue(maxsize=1)) + rollout_queues.append(queue.Queue(maxsize=1)) + params_queues[-1].put(device_params) + threading.Thread( + target=rollout, + args=( + jax.device_put(key, devices[d_id]), + config, + rollout_queues[-1], + params_queues[-1], + d_idx * config.arch.n_threads_per_executor + thread_id, + apply_fns, + logger, + learner_devices, + ), + ).start() + + # Run experiment for the total number of updates. + rollout_queue_get_time: deque = deque(maxlen=10) + data_transfer_time: deque = deque(maxlen=10) + trainer_update_number = 0 + max_episode_return = jnp.float32(0.0) best_params = None - for eval_step in range(config.arch.num_evaluation): - # Train. - start_time = time.time() - - learner_output = learn(learner_state) - jax.block_until_ready(learner_output) - - # Log the results of the training. - elapsed_time = time.time() - start_time - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - - # Separately log timesteps, actoring metrics and training metrics. - logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) - if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) - - # Prepare for evaluation. - start_time = time.time() - - trained_params = unreplicate_batch_dim(learner_state.params.actor_params) - key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) - eval_keys = jnp.stack(eval_keys) - eval_keys = eval_keys.reshape(n_devices, -1) - - # Evaluate. - evaluator_output = evaluator(trained_params, eval_keys) - jax.block_until_ready(evaluator_output) - - # Log the results of the evaluation. - elapsed_time = time.time() - start_time - episode_return = jnp.mean(evaluator_output.episode_metrics["episode_return"]) - - steps_per_eval = int(jnp.sum(evaluator_output.episode_metrics["episode_length"])) - evaluator_output.episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(evaluator_output.episode_metrics, t, eval_step, LogEvent.EVAL) - - if save_checkpoint: - # Save checkpoint of learner state - checkpointer.save( - timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state), - episode_return=episode_return, - ) - - if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(trained_params) - max_episode_return = episode_return - - # Update runner state to continue training. - learner_state = learner_output.learner_state - - # Record the performance for the final evaluation run. - eval_performance = float(jnp.mean(evaluator_output.episode_metrics[config.env.eval_metric])) - - # Measure absolute metric. - if config.arch.absolute_metric: - start_time = time.time() - - key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) - eval_keys = jnp.stack(eval_keys) - eval_keys = eval_keys.reshape(n_devices, -1) - - evaluator_output = absolute_metric_evaluator(best_params, eval_keys) - jax.block_until_ready(evaluator_output) - - elapsed_time = time.time() - start_time - steps_per_eval = int(jnp.sum(evaluator_output.episode_metrics["episode_length"])) - t = int(steps_per_rollout * (eval_step + 1)) - evaluator_output.episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(evaluator_output.episode_metrics, t, eval_step, LogEvent.ABSOLUTE) - - # Stop the logger. - logger.stop() + while True: + trainer_update_number += 1 + rollout_queue_get_time_start = time.time() + sharded_storages = [] + sharded_next_obss = [] + sharded_next_dones = [] + sharded_next_action_masks = [] + + # Loop through each executor device + for d_idx, _ in enumerate(config.arch.executor_device_ids): + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + # Get data from rollout queue + ( + t_env, + sharded_storage, + sharded_next_obs, + sharded_next_done, + avg_params_queue_get_time, + ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() + sharded_storages.append(sharded_storage) + sharded_next_obss.append(sharded_next_obs) + sharded_next_dones.append(sharded_next_done) + + rollout_queue_get_time.append(time.time() - rollout_queue_get_time_start) + training_time_start = time.time() + + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) + + # Send updated params to executors + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + for d_idx, d_id in enumerate(config.arch.executor_device_ids): + device_params = jax.device_put(unreplicated_params, devices[d_id]) + for thread_id in range(config.arch.n_threads_per_executor): + params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( + device_params + ) - return eval_performance + return None#eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py index b868f69b6..fa3798ce5 100644 --- a/mava/systems/sebulba/ppo/test.py +++ b/mava/systems/sebulba/ppo/test.py @@ -21,7 +21,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/utils/checkpointing.py b/mava/utils/checkpointing.py index 8955f76ce..230c4938d 100644 --- a/mava/utils/checkpointing.py +++ b/mava/utils/checkpointing.py @@ -24,7 +24,7 @@ from jax.tree_util import tree_map from omegaconf import DictConfig, OmegaConf -from mava.systems.ppo.types import HiddenStates, Params +from mava.systems.anakin.ppo.types import HiddenStates, Params from mava.types import MavaState # Keep track of the version of the checkpointer From 7e80d7b5f345f5606684bfbc050fca301b700cff Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 14 Jun 2024 12:46:32 +0100 Subject: [PATCH 017/139] fix: changes the env creation --- mava/systems/sebulba/ppo/ff_ippo.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 95e722546..779891cfb 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -27,7 +27,6 @@ from collections import deque from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict -from jumanji.env import Environment from omegaconf import DictConfig, OmegaConf from optax._src.base import OptState from rich.pretty import pprint @@ -61,7 +60,7 @@ def rollout( learner_devices: List): #create envs - env = environments.make(config) + env = environments.make_gym_env(config.env.scenario.name, config) #setup len_executor_device_ids = len(config.arch.executor_device_ids) @@ -461,7 +460,7 @@ def learner_setup( n_devices = len(learner_devices) #create temporory envoirnments. - env = environments.make(config) + env = environments.make_gym_env(config.env.scenario.name, config) # Get number of agents and actions. action_space = env.single_action_space config.system.num_agents = len(action_space) From b961336e21e75aa41821047e935a6bb4aa8eb292 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 15 Jun 2024 21:36:36 +0100 Subject: [PATCH 018/139] fix: fixed function calls --- mava/configs/arch/sebulba.yaml | 2 +- mava/systems/sebulba/ppo/ff_ippo.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 98cd4d96d..ac8c4eb75 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,6 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 16 # number of envs per thread +num_envs: 2 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 779891cfb..671e6f65c 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -60,7 +60,7 @@ def rollout( learner_devices: List): #create envs - env = environments.make_gym_env(config.env.scenario.name, config) + env = environments.make_gym_env(config) #setup len_executor_device_ids = len(config.arch.executor_device_ids) @@ -460,19 +460,19 @@ def learner_setup( n_devices = len(learner_devices) #create temporory envoirnments. - env = environments.make_gym_env(config.env.scenario.name, config) + env = environments.make_gym_env(config) # Get number of agents and actions. action_space = env.single_action_space config.system.num_agents = len(action_space) config.system.num_actions = action_space[0].n # PRNG keys. - key, actor_net_key, critic_net_key = keys + actor_net_key, critic_net_key = keys # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=env.action_dim + config.network.action_head, action_dim=config.system.num_actions ) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) @@ -494,7 +494,7 @@ def learner_setup( # Initialise observation: Select only obs for a single agent. init_obs = np.array([env.single_observation_space.sample()[0]]) init_action_mask = np.ones((1, config.system.num_actions)) - init_x = Observation(init_obs, init_action_mask) + init_x = Observation(init_obs, init_action_mask, None) # Initialise actor params and optimiser state. actor_params = actor_network.init(actor_net_key, init_x) @@ -512,7 +512,7 @@ def learner_setup( update_fns = (actor_optim.update, critic_optim.update) # Get batched iterated update and replicate it to pmap it over cores. - learn = get_learner_fn(env, apply_fns, update_fns, config) + learn = get_learner_fn(apply_fns, update_fns, config) learn = jax.pmap(learn, axis_name="device", devices = learner_devices) # Load model from checkpoint if specified. @@ -539,7 +539,7 @@ def learner_setup( # Initialise learner state. params, opt_states = replicate_learner - init_learner_state = LearnerState(params, opt_states) + init_learner_state = LearnerState(params, opt_states, None, None, None) env.close() return learn, apply_fns, init_learner_state @@ -574,7 +574,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup learner. learn, apply_fns , learner_state = learner_setup( - learner_keys, config, learner_devices + (actor_net_key, critic_net_key), config, learner_devices ) # Setup evaluator. @@ -591,7 +591,7 @@ def run_experiment(_config: DictConfig) -> float: # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation steps_per_rollout = ( - config.arch.executor_device_ids + len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor * config.system.num_updates_per_eval * config.system.rollout_length From 502730d4d82fb62a3d085a30d13f17c3978f6768 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 22 Jun 2024 12:03:38 +0100 Subject: [PATCH 019/139] fix: fixed the training and added training logger --- mava/configs/arch/sebulba.yaml | 4 +- mava/systems/anakin/ppo/ff_ippo.py | 4 +- mava/systems/anakin/ppo/ff_mappo.py | 4 +- mava/systems/anakin/ppo/rec_ippo.py | 4 +- mava/systems/anakin/ppo/rec_mappo.py | 4 +- mava/systems/anakin/q_learning/rec_iql.py | 4 +- mava/systems/anakin/sac/ff_isac.py | 4 +- mava/systems/anakin/sac/ff_masac.py | 4 +- mava/systems/sebulba/ppo/ff_ippo.py | 162 +++++++++++----------- mava/systems/sebulba/ppo/orig.py | 5 +- mava/systems/sebulba/ppo/test.py | 23 ++- mava/utils/total_timestep_checker.py | 32 ++++- 12 files changed, 145 insertions(+), 109 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index ac8c4eb75..cd47dca13 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,6 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 2 # number of envs per thread +num_envs: 4 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select @@ -12,7 +12,7 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- -n_threads_per_executor: 1 # num of different threads/env batches per actor +n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index 44e196535..98920428e 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -42,7 +42,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -465,7 +465,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. - config = check_total_timesteps(config) + config = anakin_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py index 519fa4f39..dda1ef14b 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -41,7 +41,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -462,7 +462,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. - config = check_total_timesteps(config) + config = anakin_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py index e70a59f07..5aff93ee6 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/anakin/ppo/rec_ippo.py @@ -45,7 +45,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -622,7 +622,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 ) # Calculate total timesteps. - config = check_total_timesteps(config) + config = anakin_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py index 14284cedb..7efbad9d2 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -45,7 +45,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -614,7 +614,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 ) # Calculate total timesteps. - config = check_total_timesteps(config) + config = anakin_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/anakin/q_learning/rec_iql.py b/mava/systems/anakin/q_learning/rec_iql.py index 6be8e61a4..60fd98d5c 100644 --- a/mava/systems/anakin/q_learning/rec_iql.py +++ b/mava/systems/anakin/q_learning/rec_iql.py @@ -52,7 +52,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.wrappers import episode_metrics @@ -528,7 +528,7 @@ def update_step( def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = check_total_timesteps(cfg) + cfg = anakin_check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/systems/anakin/sac/ff_isac.py b/mava/systems/anakin/sac/ff_isac.py index 2c33028d1..7e4e20335 100644 --- a/mava/systems/anakin/sac/ff_isac.py +++ b/mava/systems/anakin/sac/ff_isac.py @@ -51,7 +51,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.wrappers import episode_metrics @@ -483,7 +483,7 @@ def update_step(carry: LearnerState, _: Any) -> Tuple[LearnerState, Tuple[Metric def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = check_total_timesteps(cfg) + cfg = anakin_check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/systems/anakin/sac/ff_masac.py b/mava/systems/anakin/sac/ff_masac.py index 4401906ee..d5fb9172d 100644 --- a/mava/systems/anakin/sac/ff_masac.py +++ b/mava/systems/anakin/sac/ff_masac.py @@ -52,7 +52,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.wrappers import episode_metrics @@ -501,7 +501,7 @@ def update_step(carry: LearnerState, _: Any) -> Tuple[LearnerState, Tuple[Metric def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = check_total_timesteps(cfg) + cfg = anakin_check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 671e6f65c..f5a97b807 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -20,6 +20,7 @@ import flax import hydra import jax +import jax.debug import jax.numpy as jnp import numpy as np import optax @@ -34,8 +35,8 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation +from mava.systems.sebulba.ppo.types import LearnerState, OptStates, Params, PPOTransition, Observation #todo: change this Observation to use the origial one +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import ( @@ -44,26 +45,28 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import sebulba_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics def rollout( - rng: chex.PRNGKey, + key: chex.PRNGKey, config: DictConfig, rollout_queue: queue.Queue, params_queue: queue.Queue, device_thread_id: int, apply_fns: Tuple, logger: MavaLogger, - learner_devices: List): + learner_devices: List, + actor_device_id : int): #create envs env = environments.make_gym_env(config) #setup len_executor_device_ids = len(config.arch.executor_device_ids) + current_actor_device = jax.devices()[actor_device_id] t_env = 0 start_time = time.time() @@ -78,9 +81,10 @@ def get_action_and_value( ) -> Tuple: """Get action and value.""" key, subkey = jax.random.split(key) - - policy = actor_apply_fn(params.actor_params, observation) - action, log_prob = policy.sample_and_log_prob(seed=subkey) + + actor_policy = actor_apply_fn(params.actor_params, observation) + action = actor_policy.sample(seed=subkey) + log_prob = actor_policy.log_prob(action) value = critic_apply_fn(params.critic_params, observation).squeeze() return action, log_prob, value, key @@ -89,7 +93,7 @@ def get_action_and_value( def prepare_data(storage: List[PPOTransition]) -> PPOTransition: """Prepare data to share with learner.""" return jax.tree_map( # type: ignore - lambda *xs: jnp.split(jnp.stack(xs), len(learner_devices), axis=1), *storage + lambda *xs: jnp.stack(xs), *storage ) @@ -98,7 +102,10 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: rollout_time: deque = deque(maxlen=10) rollout_queue_put_time: deque = deque(maxlen=10) - next_obs, next_rewards, next_dones , extra = env.reset() + next_obs , info = env.reset() + next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + + move_to_device = lambda x : jax.device_put(x, device = current_actor_device) # Loop till the learner has finished training for update in range(1, config.system.num_updates + 2): @@ -113,15 +120,16 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: params = params_queue.get() params_queue_get_time.append(time.time() - params_queue_get_time_start) - # Rollout + # Rollout rollout_time_start = time.time() storage: List = [] # Loop over the rollout length for _ in range(0, config.system.rollout_length): # Cached for transition - cached_next_obs = next_obs - cached_next_dones = next_dones - + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) + cached_next_dones = move_to_device(next_dones) + cashed_action_mask = move_to_device(jnp.stack([*info["actions_mask"]], axis = 0) ) #unpack the numpy object, find a more pythonic way? + # Increment current timestep t_env += ( config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs @@ -129,24 +137,20 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: # Get action and value inference_time_start = time.time() - + # ( action, log_prob, value, - rng, - ) = get_action_and_value(params, cached_next_obs, rng) + key, + ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) inference_time += time.time() - inference_time_start # Step the environment env_send_time_start = time.time() - cpu_action = np.array(action) - next_obs, next_reward, next_dones, extra = env.step(cpu_action) - - next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), - (next_dones), - ) + cpu_action = jax.device_get(action) + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env + next_dones = np.logical_or(terminated, truncated) # Append data to storage env_send_time += time.time() - env_send_time_start @@ -158,38 +162,32 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: value=value, reward=next_reward, log_prob=log_prob, - obs=cached_next_obs, - info=extra, - ) + obs=Observation(cached_next_obs, cashed_action_mask), + info={"win_rate" : info.get("win_rate")}, + )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 ) storage_time += time.time() - storage_time_start rollout_time.append(time.time() - rollout_time_start) # Prepare data to share with learner - # todo: investigate the thread --> single learning + # todo: investigate te thread --> single learning partitioned_storage = prepare_data(storage) - sharded_storage = PPOTransition( - *list( # noqa: C417 - map( - lambda x: jax.device_put_sharded(x, devices=learner_devices), # type: ignore - partitioned_storage, - ) - ) - ) + #sorage has shape rollout_len, num_agents, num_envs, .... while the other vectors have num_agents, num_envs, ... -> their split axis is diffrent + shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) + + sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , partitioned_storage) - sharded_next_obs = jax.device_put_sharded( - np.split(next_obs, len(learner_devices)), devices=learner_devices - ) - sharded_next_done = jax.device_put_sharded( - np.split(next_dones, len(learner_devices)), devices=learner_devices - ) + sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) + sharded_next_action_mask = shard_split_payload(jnp.stack([*info["actions_mask"]], axis = 0), 0) + sharded_next_done = shard_split_payload(next_dones, 0) payload = ( t_env, sharded_storage, sharded_next_obs, sharded_next_done, + sharded_next_action_mask, np.mean(params_queue_get_time), ) @@ -210,7 +208,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, _: Any, traj_batch : PPOTransition, last_obs: chex.Array, last_done: chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, _: Any, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -252,8 +250,8 @@ def _get_advantages( # CALCULATE ADVANTAGE params, opt_states, key, _, _ = learner_state - last_val = critic_apply_fn(params.critic_params, last_obs) - advantages, targets = _calculate_gae(traj_batch, last_val, last_done) + last_val = critic_apply_fn(params.critic_params, Observation(last_obs, last_action_mask)) + advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) def _update_epoch(update_state: Tuple, _: Any) -> Tuple: """Update the network for a single epoch.""" @@ -338,18 +336,11 @@ def _critic_loss_fn( # Compute the parallel mean (pmean) over the batch. # This calculation is inspired by the Anakin architecture demo notebook. # available at https://tinyurl.com/26tdzs5x - # This pmean could be a regular mean as the batch axis is on the same device. - actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="batch" - ) # pmean over devices. actor_grads, actor_loss_info = jax.lax.pmean( (actor_grads, actor_loss_info), axis_name="device" ) - critic_grads, critic_loss_info = jax.lax.pmean( - (critic_grads, critic_loss_info), axis_name="batch" - ) # pmean over devices. critic_grads, critic_loss_info = jax.lax.pmean( (critic_grads, critic_loss_info), axis_name="device" @@ -370,7 +361,6 @@ def _critic_loss_fn( # PACK NEW PARAMS AND OPTIMISER STATE new_params = Params(actor_new_params, critic_new_params) new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) - # PACK LOSS INFO total_loss = actor_loss_info[0] + critic_loss_info[0] value_loss = critic_loss_info[1] @@ -386,9 +376,8 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) - # SHUFFLE MINIBATCHES - batch_size = config.system.rollout_length * config.arch.num_envs + batch_size = config.system.rollout_length * config.arch.num_envs * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) @@ -399,7 +388,6 @@ def _critic_loss_fn( lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), shuffled_batch, ) - # UPDATE MINIBATCHES (params, opt_states, entropy_key), loss_info = jax.lax.scan( _update_minibatch, (params, opt_states, entropy_key), minibatches @@ -409,18 +397,17 @@ def _critic_loss_fn( return update_state, loss_info update_state = (params, opt_states, traj_batch, advantages, targets, key) - # UPDATE EPOCHS update_state, loss_info = jax.lax.scan( _update_epoch, update_state, None, config.system.ppo_epochs ) params, opt_states, traj_batch, advantages, targets, key = update_state - learner_state = LearnerState(params, opt_states, key) - metric = traj_batch.info + learner_state = LearnerState(params, opt_states, key, None, None) + metric = traj_batch.info #todo: metrci calcualtions return learner_state, (metric, loss_info) - def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_done: chex.Array) -> ExperimentOutput[LearnerState]: + def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: """Learner function. This function represents the learner, it updates the network parameters @@ -435,14 +422,13 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs - env_state (LogEnvState): The environment state. - timesteps (TimeStep): The initial timestep in the initial trajectory. """ - - batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + # Broadcast static parameters for scan + partial_update_step = lambda learner_state, xs : _update_step(learner_state, xs, traj_batch , last_obs, last_action_mask, last_dones) - partial_batched_update_step = lambda learner_state, xs : batched_update_step(learner_state, xs, traj_batch , last_obs, last_done) - learner_state, (episode_info, loss_info) = jax.lax.scan( - partial_batched_update_step, learner_state, None, config.system.num_updates_per_eval + partial_update_step, learner_state, None, config.system.num_updates_per_eval ) + return ExperimentOutput( learner_state=learner_state, episode_metrics=episode_info, @@ -467,7 +453,7 @@ def learner_setup( config.system.num_actions = action_space[0].n # PRNG keys. - actor_net_key, critic_net_key = keys + key, actor_net_key, critic_net_key = keys # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) @@ -492,9 +478,9 @@ def learner_setup( ) # Initialise observation: Select only obs for a single agent. - init_obs = np.array([env.single_observation_space.sample()[0]]) - init_action_mask = np.ones((1, config.system.num_actions)) - init_x = Observation(init_obs, init_action_mask, None) + init_obs = jnp.array([env.single_observation_space.sample()]) + init_action_mask = jnp.ones((config.system.num_agents, config.system.num_actions)) + init_x = Observation(init_obs, init_action_mask) # Initialise actor params and optimiser state. actor_params = actor_network.init(actor_net_key, init_x) @@ -527,19 +513,16 @@ def learner_setup( params = restored_params # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) opt_states = OptStates(actor_opt_state, critic_opt_state) - replicate_learner = (params, opt_states) - - # Duplicate learner for update_batch_size. - broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size,) + x.shape) - replicate_learner = jax.tree_map(broadcast, replicate_learner) + replicate_learner = (params, opt_states, step_keys) # Duplicate learner across Learner devices. replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) # Initialise learner state. - params, opt_states = replicate_learner - init_learner_state = LearnerState(params, opt_states, None, None, None) + params, opt_states, step_keys = replicate_learner + init_learner_state = LearnerState(params, opt_states, step_keys, None, None) env.close() return learn, apply_fns, init_learner_state @@ -562,7 +545,7 @@ def run_experiment(_config: DictConfig) -> float: # Sanity check of config assert ( config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments need to be divisible by the number of learners " + ), "The number of environments must to be divisible by the number of learners " assert ( int(config.arch.num_envs / len(config.arch.learner_device_ids)) @@ -574,7 +557,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup learner. learn, apply_fns , learner_state = learner_setup( - (actor_net_key, critic_net_key), config, learner_devices + (key ,actor_net_key, critic_net_key), config, learner_devices ) # Setup evaluator. @@ -583,7 +566,7 @@ def run_experiment(_config: DictConfig) -> float: #evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. - config = check_total_timesteps(config) #todo: update this for sebulba + config = sebulba_check_total_timesteps(config) #todo: update this for sebulba assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." @@ -595,7 +578,6 @@ def run_experiment(_config: DictConfig) -> float: * config.arch.n_threads_per_executor * config.system.num_updates_per_eval * config.system.rollout_length - * config.system.update_batch_size * config.arch.num_envs ) @@ -639,6 +621,7 @@ def run_experiment(_config: DictConfig) -> float: apply_fns, logger, learner_devices, + d_id, ), ).start() @@ -648,7 +631,7 @@ def run_experiment(_config: DictConfig) -> float: trainer_update_number = 0 max_episode_return = jnp.float32(0.0) best_params = None - while True: + for eval_step in range(config.arch.num_evaluation): #todo : place holder trainer_update_number += 1 rollout_queue_get_time_start = time.time() sharded_storages = [] @@ -666,25 +649,36 @@ def run_experiment(_config: DictConfig) -> float: sharded_storage, sharded_next_obs, sharded_next_done, + sharded_next_action_mask, avg_params_queue_get_time, ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() sharded_storages.append(sharded_storage) sharded_next_obss.append(sharded_next_obs) sharded_next_dones.append(sharded_next_done) - + sharded_next_action_masks.append(sharded_next_action_mask) rollout_queue_get_time.append(time.time() - rollout_queue_get_time_start) training_time_start = time.time() - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) + #Concatinate the returned trajectories on the n_env axis + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) + sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) + sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) + sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) + + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) for d_idx, d_id in enumerate(config.arch.executor_device_ids): device_params = jax.device_put(unreplicated_params, devices[d_id]) for thread_id in range(config.arch.n_threads_per_executor): params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( device_params ) + + t = int(steps_per_rollout * (eval_step + 1)) + logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + return None#eval_performance diff --git a/mava/systems/sebulba/ppo/orig.py b/mava/systems/sebulba/ppo/orig.py index 85b679305..dde0add30 100644 --- a/mava/systems/sebulba/ppo/orig.py +++ b/mava/systems/sebulba/ppo/orig.py @@ -43,7 +43,6 @@ ActorApply, CriticApply, LearnerState, - Observation, OptStates, Params, ) @@ -189,8 +188,8 @@ def prepare_data(storage: List[Transition]) -> Transition: ) storage_time += time.time() - storage_time_start - # Update episode info - episode_returns[env_id] += np.mean(next_reward) + # Update episode info ---------------------------------------------------------------------------------------------------------- this is kinda cringe? + episode_returns[env_id] += np.mean(next_reward, axis = 1) returned_episode_returns[env_id] = np.where( next_done, episode_returns[env_id], diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py index fa3798ce5..adc15dcc7 100644 --- a/mava/systems/sebulba/ppo/test.py +++ b/mava/systems/sebulba/ppo/test.py @@ -31,20 +31,33 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.total_timestep_checker import anakin_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics - - +from flax import linen as nn +import gym +from mava.wrappers import GymRwareWrapper @hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. OmegaConf.set_struct(cfg, False) + + base = gym.make(cfg.env.scenario) + base = GymRwareWrapper(base, cfg.env.use_individual_rewards, False, True) + base.reset() + ree = base.step([0,0]) + print(ree) env = environments.make_gym_env(cfg) a = env.reset() print(a) + b = env.step([[0,0], [0,0], [0,0], [0,0]]) + #print(b) + #r = 1+1 + # Create a sample input + #env = gym.make(cfg.env.scenario) + #env.reset() + #a = env.step(jnp.ones((4))) -if __name__ == "__main__": - hydra_entry_point() \ No newline at end of file +hydra_entry_point() \ No newline at end of file diff --git a/mava/utils/total_timestep_checker.py b/mava/utils/total_timestep_checker.py index c2cda8320..fd90b7436 100644 --- a/mava/utils/total_timestep_checker.py +++ b/mava/utils/total_timestep_checker.py @@ -18,7 +18,7 @@ from omegaconf import DictConfig -def check_total_timesteps(config: DictConfig) -> DictConfig: +def anakin_check_total_timesteps(config: DictConfig) -> DictConfig: """Check if total_timesteps is set, if not, set it based on the other parameters""" n_devices = len(jax.devices()) @@ -47,3 +47,33 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: + f"{Style.RESET_ALL}" ) return config + + +def sebulba_check_total_timesteps(config: DictConfig) -> DictConfig: + """Check if total_timesteps is set, if not, set it based on the other parameters""" + + if config.system.total_timesteps is None: + config.system.num_updates = int(config.system.num_updates) + config.system.total_timesteps = int( + len(config.arch.executor_device_ids) + * config.arch.n_threads_per_executor + * config.system.num_updates + * config.system.rollout_length + * config.arch.num_envs + ) + else: + config.system.total_timesteps = int(config.system.total_timesteps) + config.system.num_updates = int( + config.system.total_timesteps + // config.system.rollout_length + // config.arch.num_envs + // config.arch.n_threads_per_executor + // len(config.arch.executor_device_ids) + ) + print( + f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " + + f"to {config.system.num_updates}: If you want to train" + + " for a specific number of updates, please set total_timesteps to None!" + + f"{Style.RESET_ALL}" + ) + return config \ No newline at end of file From 1985729cab347716153d3f5e00713b08eeb96f1b Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 22 Jun 2024 12:37:46 +0100 Subject: [PATCH 020/139] fix: changed the anakin ppo type import --- mava/systems/anakin/ppo/ff_ippo.py | 2 +- mava/systems/anakin/ppo/ff_mappo.py | 2 +- mava/systems/anakin/ppo/rec_ippo.py | 2 +- mava/systems/anakin/ppo/rec_mappo.py | 2 +- mava/systems/sebulba/ppo/ff_ippo.py | 16 ++++++++++++++-- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index 98920428e..d8cd0e9b4 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -32,7 +32,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py index dda1ef14b..a4ddfdaa5 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -31,7 +31,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py index 5aff93ee6..512a09301 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/anakin/ppo/rec_ippo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.ppo.types import ( +from mava.systems.anakin.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py index 7efbad9d2..529a0505b 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.ppo.types import ( +from mava.systems.anakin.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index f5a97b807..0ce93cda0 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -667,6 +667,12 @@ def run_experiment(_config: DictConfig) -> float: learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) + # Log the results of the training. + elapsed_time = time.time() - rollout_queue_get_time_start + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + # Send updated params to executors unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) for d_idx, d_id in enumerate(config.arch.executor_device_ids): @@ -675,8 +681,11 @@ def run_experiment(_config: DictConfig) -> float: params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( device_params ) - - t = int(steps_per_rollout * (eval_step + 1)) + + # Separately log timesteps, actoring metrics and training metrics. + logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) @@ -697,3 +706,6 @@ def hydra_entry_point(cfg: DictConfig) -> float: if __name__ == "__main__": hydra_entry_point() + +#learner_output.episode_metrics.keys() +#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file From 89ed2466e8a3bbaff26eb60145a6dbb85e5e929c Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 25 Jun 2024 15:43:31 +0100 Subject: [PATCH 021/139] feat: fulll sebulba functional --- .../ff_ippo_store_experience.py | 4 +- mava/configs/arch/sebulba.yaml | 2 +- mava/configs/env/gym.yaml | 2 +- mava/configs/system/ppo/ff_ippo.yaml | 4 +- mava/evaluator.py | 129 +++++++++++- mava/systems/anakin/ppo/ff_ippo.py | 4 +- mava/systems/anakin/ppo/ff_mappo.py | 4 +- mava/systems/anakin/ppo/rec_ippo.py | 4 +- mava/systems/anakin/ppo/rec_mappo.py | 4 +- mava/systems/anakin/q_learning/rec_iql.py | 4 +- mava/systems/anakin/sac/ff_isac.py | 4 +- mava/systems/anakin/sac/ff_masac.py | 4 +- mava/systems/sebulba/ppo/ff_ippo.py | 168 ++++++++------- mava/systems/sebulba/ppo/test.py | 46 +++-- mava/utils/logger.py | 2 +- mava/utils/make_env.py | 10 +- mava/wrappers/__init__.py | 2 +- mava/wrappers/episode_metrics.py | 2 +- mava/wrappers/gym.py | 193 +++++++++++++----- 19 files changed, 424 insertions(+), 168 deletions(-) diff --git a/mava/advanced_usage/ff_ippo_store_experience.py b/mava/advanced_usage/ff_ippo_store_experience.py index 4bd94040c..4236bc641 100644 --- a/mava/advanced_usage/ff_ippo_store_experience.py +++ b/mava/advanced_usage/ff_ippo_store_experience.py @@ -30,7 +30,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition @@ -469,7 +469,7 @@ def run_experiment(_config: DictConfig) -> None: # noqa: CCR001 # Setup evaluator. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network, config) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor_network, config) config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation steps_per_rollout = ( diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index cd47dca13..02ae56bb3 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,6 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 4 # number of envs per thread +num_envs: 64 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml index ad8d16b9a..44c9c624a 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym.yaml @@ -10,7 +10,7 @@ eval_metric: episode_return # Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. # This should not be changed. -implicit_agent_id: False +implicit_agent_id: True # Whether or not to log the winrate of this environment. This should not be changed as not all # environments have a winrate metric. log_win_rate: False diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index 9efb0611a..b8d0573b4 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -1,6 +1,6 @@ # --- Defaults FF-IPPO --- -total_timesteps: ~ # Set the total environment steps. +total_timesteps: 20_000_000 # Set the total environment steps. # If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. num_updates: 1000 # Number of updates seed: 42 @@ -14,7 +14,7 @@ critic_lr: 2.5e-4 # Learning rate for critic network update_batch_size: 2 # Number of vectorised gradient updates per device. rollout_length: 128 # Number of environment steps per vectorised environment. ppo_epochs: 4 # Number of ppo epochs per training data batch. -num_minibatches: 2 # Number of minibatches per ppo epoch. +num_minibatches: 1 # Number of minibatches per ppo epoch. gamma: 0.99 # Discounting factor. gae_lambda: 0.95 # Lambda value for GAE computation. clip_eps: 0.2 # Clipping value for PPO updates and value function. diff --git a/mava/evaluator.py b/mava/evaluator.py index 201544338..066890ed9 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -31,8 +31,10 @@ RNNEvalState, ) +from mava.systems.sebulba.ppo.types import Observation +import numpy as np -def get_ff_evaluator_fn( +def get_anakin_ff_evaluator_fn( env: Environment, apply_fn: ActorApply, config: DictConfig, @@ -282,7 +284,7 @@ def evaluator_fn( return evaluator_fn -def make_eval_fns( +def make_anakin_eval_fns( eval_env: Environment, network_apply_fn: Union[ActorApply, RecActorApply], config: DictConfig, @@ -327,10 +329,10 @@ def make_eval_fns( 10, ) else: - evaluator = get_ff_evaluator_fn( + evaluator = get_anakin_ff_evaluator_fn( eval_env, network_apply_fn, config, log_win_rate # type: ignore ) - absolute_metric_evaluator = get_ff_evaluator_fn( + absolute_metric_evaluator = get_anakin_ff_evaluator_fn( eval_env, network_apply_fn, config, log_win_rate, 10 # type: ignore ) @@ -338,3 +340,122 @@ def make_eval_fns( absolute_metric_evaluator = jax.pmap(absolute_metric_evaluator, axis_name="device") return evaluator, absolute_metric_evaluator + + +def get_sebulba_ff_evaluator_fn( + env: Environment, + apply_fn: ActorApply, + config: DictConfig, + log_win_rate: bool = False, +) -> EvalFn: + """Get the evaluator function for feedforward networks. + + Args: + env (Environment): An evironment instance for evaluation. + apply_fn (callable): Network forward pass method. + config (dict): Experiment configuration. + """ + @jax.jit + def get_action( #todo explicetly put these on the learner? they should already be there + params: FrozenDict, + observation: Observation, + key: chex.PRNGKey, + ) -> Tuple: + """Get action.""" + + pi = apply_fn(params, observation) + + if config.arch.evaluation_greedy: + action = pi.mode() + else: + action = pi.sample(seed=key) + + return action + def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: + + dones = np.zeros(env.num_envs) # todo: jnp or np? + + obs, info = env.reset() + eval_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) + + while not dones.all(): + + key, policy_key = jax.random.split(key) + + obs = jax.device_put(jnp.stack(obs, axis = 1)) + action_mask = jax.device_put(jnp.stack([*info["actions_mask"]], axis = 0)) + + actions = get_action(params, Observation(obs, action_mask), policy_key) + cpu_action = jax.device_get(actions) + + obs, reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) + + next_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) + + next_dones = next_metrics["is_terminal_step"] + + update_metric = lambda old_metric, new_metric : np.where(np.logical_and(next_dones, dones == False), new_metric, old_metric) + eval_metrics = jax.tree_map(update_metric, eval_metrics, next_metrics) + + dones = np.logical_or(dones, next_dones) + eval_metrics.pop("is_terminal_step") + + return eval_metrics + + return eval_episodes + + +def make_sebulba_eval_fns( + eval_env_fn: callable, + network_apply_fn: Union[ActorApply, RecActorApply], + config: DictConfig, + use_recurrent_net: bool = False, + scanned_rnn: Optional[nn.Module] = None, +) -> Tuple[EvalFn, EvalFn]: + """Initialize evaluator functions for reinforcement learning. + + Args: + eval_env_fn (Environment): The function to Create the eval envs. + network_apply_fn (Union[ActorApply,RecActorApply]): Creates a policy to sample. + config (DictConfig): The configuration settings for the evaluation. + use_recurrent_net (bool, optional): Whether to use a rnn. Defaults to False. + scanned_rnn (Optional[nn.Module], optional): The rnn module. + Required if `use_recurrent_net` is True. Defaults to None. + + Returns: + Tuple[EvalFn, EvalFn]: A tuple of two evaluation functions: + one for use during training and one for absolute metrics. + + Raises: + AssertionError: If `use_recurrent_net` is True but `scanned_rnn` is not provided. + """ + eval_env, absolute_eval_env = eval_env_fn(config, config.arch.num_eval_episodes), eval_env_fn(config, config.arch.num_eval_episodes * 10) + + # Check if win rate is required for evaluation. + log_win_rate = config.env.log_win_rate + # Vmap it over number of agents and create evaluator_fn. + if use_recurrent_net: + assert scanned_rnn is not None + evaluator = get_rnn_evaluator_fn( + eval_env, + network_apply_fn, # type: ignore + config, + scanned_rnn, + log_win_rate, + ) + absolute_metric_evaluator = get_rnn_evaluator_fn( + absolute_eval_env, + network_apply_fn, # type: ignore + config, + scanned_rnn, + log_win_rate, + ) + else: + evaluator = get_sebulba_ff_evaluator_fn( + eval_env, network_apply_fn, config, log_win_rate # type: ignore + ) + absolute_metric_evaluator = get_sebulba_ff_evaluator_fn( + absolute_eval_env, network_apply_fn, config, log_win_rate # type: ignore + ) + + return evaluator, absolute_metric_evaluator \ No newline at end of file diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index d8cd0e9b4..f0803de4d 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -29,7 +29,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition @@ -462,7 +462,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. config = anakin_check_total_timesteps(config) diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py index a4ddfdaa5..90fad5767 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -28,7 +28,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition @@ -459,7 +459,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor_network.apply, config) # Calculate total timesteps. config = anakin_check_total_timesteps(config) diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py index 512a09301..583cd7acc 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/anakin/ppo/rec_ippo.py @@ -29,7 +29,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN @@ -613,7 +613,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns( + evaluator, absolute_metric_evaluator = make_anakin_eval_fns( eval_env=eval_env, network_apply_fn=actor_network.apply, config=config, diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py index 529a0505b..74179ab34 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -29,7 +29,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN @@ -605,7 +605,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_eval_fns( + evaluator, absolute_metric_evaluator = make_anakin_eval_fns( eval_env=eval_env, network_apply_fn=actor_network.apply, config=config, diff --git a/mava/systems/anakin/q_learning/rec_iql.py b/mava/systems/anakin/q_learning/rec_iql.py index 60fd98d5c..d3566a8d5 100644 --- a/mava/systems/anakin/q_learning/rec_iql.py +++ b/mava/systems/anakin/q_learning/rec_iql.py @@ -32,7 +32,7 @@ from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import RecQNetwork, ScannedRNN from mava.systems.q_learning.types import ( ActionSelectionState, @@ -548,7 +548,7 @@ def run_experiment(cfg: DictConfig) -> float: cfg.system.num_agents = env.num_agents key, eval_key = jax.random.split(key) - evaluator, absolute_metric_evaluator = make_eval_fns( + evaluator, absolute_metric_evaluator = make_anakin_eval_fns( eval_env=eval_env, network_apply_fn=q_net.apply, config=cfg, diff --git a/mava/systems/anakin/sac/ff_isac.py b/mava/systems/anakin/sac/ff_isac.py index 7e4e20335..a3b2e5c47 100644 --- a/mava/systems/anakin/sac/ff_isac.py +++ b/mava/systems/anakin/sac/ff_isac.py @@ -31,7 +31,7 @@ from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork from mava.systems.sac.types import ( @@ -502,7 +502,7 @@ def run_experiment(cfg: DictConfig) -> float: actor, _ = networks key, eval_key = jax.random.split(key) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor.apply, cfg) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor.apply, cfg) if cfg.logger.checkpointing.save_model: checkpointer = Checkpointer( diff --git a/mava/systems/anakin/sac/ff_masac.py b/mava/systems/anakin/sac/ff_masac.py index d5fb9172d..a319731ab 100644 --- a/mava/systems/anakin/sac/ff_masac.py +++ b/mava/systems/anakin/sac/ff_masac.py @@ -31,7 +31,7 @@ from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork from mava.systems.sac.types import ( @@ -520,7 +520,7 @@ def run_experiment(cfg: DictConfig) -> float: actor, _ = networks key, eval_key = jax.random.split(key) - evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor.apply, cfg) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor.apply, cfg) if cfg.logger.checkpointing.save_model: checkpointer = Checkpointer( diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 0ce93cda0..229e268d0 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -32,7 +32,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +from mava.evaluator import make_sebulba_eval_fns as make_eval_fns #todo: make a standered eval function from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.sebulba.ppo.types import LearnerState, OptStates, Params, PPOTransition, Observation #todo: change this Observation to use the origial one @@ -62,7 +62,7 @@ def rollout( actor_device_id : int): #create envs - env = environments.make_gym_env(config) + env = environments.make_gym_env(config, config.arch.num_envs) #setup len_executor_device_ids = len(config.arch.executor_device_ids) @@ -93,7 +93,7 @@ def get_action_and_value( def prepare_data(storage: List[PPOTransition]) -> PPOTransition: """Prepare data to share with learner.""" return jax.tree_map( # type: ignore - lambda *xs: jnp.stack(xs), *storage + lambda *xs : jnp.stack(xs), *storage ) @@ -102,73 +102,75 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: rollout_time: deque = deque(maxlen=10) rollout_queue_put_time: deque = deque(maxlen=10) - next_obs , info = env.reset() + next_obs , info = env.reset() #todo : the first info is discarded , is that a problem? next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) move_to_device = lambda x : jax.device_put(x, device = current_actor_device) # Loop till the learner has finished training - for update in range(1, config.system.num_updates + 2): - # Setup - env_recv_time: float = 0 - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 - - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) - - # Rollout - rollout_time_start = time.time() - storage: List = [] - # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) - cached_next_dones = move_to_device(next_dones) - cashed_action_mask = move_to_device(jnp.stack([*info["actions_mask"]], axis = 0) ) #unpack the numpy object, find a more pythonic way? - - # Increment current timestep - t_env += ( - config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs - ) + for eval_step in range(config.arch.num_evaluation): + for update in range(1, config.system.num_updates_per_eval + 2): + # Setup + env_recv_time: float = 0 + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 - # Get action and value - inference_time_start = time.time() - # - ( - action, - log_prob, - value, - key, - ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) - inference_time += time.time() - inference_time_start + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) - # Step the environment - env_send_time_start = time.time() - cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env - next_dones = np.logical_or(terminated, truncated) - - # Append data to storage - env_send_time += time.time() - env_send_time_start - storage_time_start = time.time() - storage.append( - PPOTransition( - done=cached_next_dones, - action=action, - value=value, - reward=next_reward, - log_prob=log_prob, - obs=Observation(cached_next_obs, cashed_action_mask), - info={"win_rate" : info.get("win_rate")}, - )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 - ) - storage_time += time.time() - storage_time_start + # Rollout + rollout_time_start = time.time() + storage: List = [] + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): + # Cached for transition + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) + cached_next_dones = move_to_device(next_dones) + cashed_action_mask = move_to_device(jnp.stack([*info["actions_mask"]], axis = 0) ) #unpack the numpy object, find a more pythonic way? + + # Increment current timestep + t_env += ( + config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs + ) + + # Get action and value + inference_time_start = time.time() + # + ( + action, + log_prob, + value, + key, + ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) + inference_time += time.time() - inference_time_start + + # Step the environment + env_send_time_start = time.time() + cpu_action = jax.device_get(action) + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env + next_dones = np.logical_or(terminated, truncated) + + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics (N_envs , N_metrics) -- > (N_metrics, N_envs) + # Append data to storage + env_send_time += time.time() - env_send_time_start + storage_time_start = time.time() + storage.append( + PPOTransition( + done=cached_next_dones, + action=action, + value=value, + reward=next_reward, + log_prob=log_prob, + obs=Observation(cached_next_obs, cashed_action_mask), + info=metrics, + )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 + ) + storage_time += time.time() - storage_time_start - rollout_time.append(time.time() - rollout_time_start) + rollout_time.append(time.time() - rollout_time_start) # Prepare data to share with learner # todo: investigate te thread --> single learning @@ -446,7 +448,7 @@ def learner_setup( n_devices = len(learner_devices) #create temporory envoirnments. - env = environments.make_gym_env(config) + env = environments.make_gym_env(config, config.arch.num_envs) # Get number of agents and actions. action_space = env.single_action_space config.system.num_agents = len(action_space) @@ -562,8 +564,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. - #eval_keys = jax.random.split(key_e, n_devices) # todo: well add the evaluations :) - #evaluator, absolute_metric_evaluator = make_eval_fns(eval_env, actor_network.apply, config) + evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config) #todo: make this more generic # Calculate total timesteps. config = sebulba_check_total_timesteps(config) #todo: update this for sebulba @@ -576,9 +577,9 @@ def run_experiment(_config: DictConfig) -> float: steps_per_rollout = ( len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor - * config.system.num_updates_per_eval * config.system.rollout_length * config.arch.num_envs + * config.system.num_updates_per_eval ) # Logger setup @@ -633,7 +634,7 @@ def run_experiment(_config: DictConfig) -> float: best_params = None for eval_step in range(config.arch.num_evaluation): #todo : place holder trainer_update_number += 1 - rollout_queue_get_time_start = time.time() + start_time = time.time() sharded_storages = [] sharded_next_obss = [] sharded_next_dones = [] @@ -656,23 +657,17 @@ def run_experiment(_config: DictConfig) -> float: sharded_next_obss.append(sharded_next_obs) sharded_next_dones.append(sharded_next_done) sharded_next_action_masks.append(sharded_next_action_mask) - rollout_queue_get_time.append(time.time() - rollout_queue_get_time_start) + rollout_queue_get_time.append(time.time() - start_time) training_time_start = time.time() #Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) #todo: check if this breaks the explicet array device placment sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) - # Log the results of the training. - elapsed_time = time.time() - rollout_queue_get_time_start - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - # Send updated params to executors unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) for d_idx, d_id in enumerate(config.arch.executor_device_ids): @@ -682,13 +677,36 @@ def run_experiment(_config: DictConfig) -> float: device_params ) + # Log the results of the training. + elapsed_time = time.time() - start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) # todo: these shapes are not as expected + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + # Separately log timesteps, actoring metrics and training metrics. logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) if ep_completed: # only log episode metrics if an episode was completed in the rollout. logger.log(episode_metrics, t, eval_step, LogEvent.ACT) logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + # Evaluation on the learner + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) + + # Log the results of the evaluation. + elapsed_time = time.time() - start_time + episode_return = jnp.mean(episode_metrics["episode_return"]) + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) + + #todo: add saving + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(learner_output.learner_state.params) + max_episode_return = episode_return + #todo: abs metric return None#eval_performance diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py index adc15dcc7..5e45544f1 100644 --- a/mava/systems/sebulba/ppo/test.py +++ b/mava/systems/sebulba/ppo/test.py @@ -5,6 +5,8 @@ import threading import chex import flax +import gym.vector +import gym.vector.async_vector_env import hydra import jax import jax.numpy as jnp @@ -18,7 +20,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_eval_fns +#from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this @@ -36,23 +38,41 @@ from mava.wrappers.episode_metrics import get_final_step_metrics from flax import linen as nn import gym -from mava.wrappers import GymRwareWrapper +import rware +from mava.wrappers import GymRwareWrapper, GymRecordEpisodeMetrics, _multiagent_worker_shared_memory @hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. - OmegaConf.set_struct(cfg, False) - + - base = gym.make(cfg.env.scenario) - base = GymRwareWrapper(base, cfg.env.use_individual_rewards, False, True) + OmegaConf.set_struct(cfg, False) + def f(): + base = gym.make(cfg.env.scenario) + base = GymRwareWrapper(base, cfg.env.use_individual_rewards, False, True) + return GymRecordEpisodeMetrics(base) + + base = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names + [ + lambda: f() + for _ in range(3) + ], + worker=_multiagent_worker_shared_memory + ) base.reset() - ree = base.step([0,0]) - print(ree) - env = environments.make_gym_env(cfg) - a = env.reset() - print(a) - b = env.step([[0,0], [0,0], [0,0], [0,0]]) + n = 0 + done = False + while not done: + n+= 1 + agents_view, reward, terminated, truncated, info = base.step([[0,0,0], [0,0,0]]) + done = np.logical_or(terminated, truncated).all() + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) + print(n, done, terminated, np.logical_or(terminated, truncated).shape, metrics) + done = True + base.close() + print(done) + + #print(b) #r = 1+1 # Create a sample input @@ -60,4 +80,4 @@ def hydra_entry_point(cfg: DictConfig) -> float: #env.reset() #a = env.step(jnp.ones((4))) -hydra_entry_point() \ No newline at end of file +hydra_entry_point() diff --git a/mava/utils/logger.py b/mava/utils/logger.py index 4edad361e..8273e44a2 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -337,7 +337,7 @@ def get_logger_path(config: DictConfig, logger_type: str) -> str: def describe(x: ArrayLike) -> Union[Dict[str, ArrayLike], ArrayLike]: """Generate summary statistics for an array of metrics (mean, std, min, max).""" - if not isinstance(x, jax.Array) or x.size <= 1: + if not (isinstance(x, jax.Array) or isinstance(x, np.ndarray)) or x.size <= 1: return x # np instead of jnp because we don't jit here diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 69fc54623..cab649880 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -46,6 +46,7 @@ GigastepWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, + _multiagent_worker_shared_memory, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -208,7 +209,7 @@ def make_gigastep_env( def make_gym_env( - config: DictConfig, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, num_env : int, add_global_state: bool = False, eval_env: bool = False ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -230,8 +231,8 @@ def create_gym_env( env = gym.make(config.env.scenario) wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) if not config.env.implicit_agent_id: - pass # todo : add agent id wrapper for gym . - env = GymRecordEpisodeMetrics(env) + wrapped_env = AgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . + wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env num_env = config.arch.num_envs @@ -239,7 +240,8 @@ def create_gym_env( [ lambda: create_gym_env(config, add_global_state, eval_env=eval_env) for _ in range(num_env) - ] + ], + worker=_multiagent_worker_shared_memory ) return envs diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index e888d9317..3608b1d10 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, _multiagent_worker_shared_memory from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/episode_metrics.py b/mava/wrappers/episode_metrics.py index a2b0fdb37..a46dc1b91 100644 --- a/mava/wrappers/episode_metrics.py +++ b/mava/wrappers/episode_metrics.py @@ -75,7 +75,7 @@ def step( # Previous episode return/length until done and then the next episode return. episode_return_info = state.episode_return * not_done + new_episode_return * done episode_length_info = state.episode_length * not_done + new_episode_length * done - + timestep.extras["episode_metrics"] = { "episode_return": episode_return_info, "episode_length": episode_length_info, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 69632f1bc..546e05614 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -13,17 +13,21 @@ # limitations under the License. import warnings -from typing import Dict, Tuple +from typing import Dict, Tuple, Optional import gym import numpy as np from numpy.typing import NDArray +from gym.spaces import Box +from gym.vector.utils import write_to_shared_memory +import sys + # Filter out the warnings warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): +class GymRwareWrapper(gym.Wrapper): """Wrapper for rware gym environments""" def __init__( @@ -44,7 +48,7 @@ def __init__( Defaults to False. """ super().__init__(env) - self._env = gym.wrappers.compatibility.EnvCompatibility(env) + self._env = env #not having _env leaded tp self.env getting replaced --> circular called self.use_individual_rewards = use_individual_rewards self.add_global_state = add_global_state # todo : add the global observations self.eval_env = eval_env @@ -52,42 +56,33 @@ def __init__( self.num_actions = self._env.action_space[ 0 ].n # todo: all the agents must have the same num_actions, add assertion? - - def reset(self) -> Tuple: - (agents_view, info), _ = self._env.reset( - seed=np.random.randint(1) - ) # todo: assure reproducibility, this only works for rware - - info = {"actions_mask": self._get_actions_mask(info)} + + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple: + + if seed is not None: + self.env.seed(seed) + + agents_view, info = self._env.reset() + + info = {"actions_mask": self.get_actions_mask(info)} return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: - - agents_view, reward, terminated, truncated, info = self.env.step(actions) + def step(self, actions: NDArray) -> Tuple: #Vect auto rest - done = np.logical_or(terminated, truncated).all() + agents_view, reward, terminated, truncated, info = self._env.step(actions) - if ( - done and not self.eval_env - ): # only auto-reset in training envs, same functionality as the AutoResetWrapper. - agents_view, info = self.reset() - reward = np.zeros(self.num_agents) - terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( - self.num_agents, dtype=bool - ) - return agents_view, reward, terminated, truncated, info - - info = {"actions_mask": self._get_actions_mask(info)} + info = {"actions_mask": self.get_actions_mask(info)} if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - return agents_view, reward, terminated, truncated, info - def _get_actions_mask(self, info: Dict) -> NDArray: + def get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) @@ -98,51 +93,151 @@ class GymRecordEpisodeMetrics(gym.Wrapper): def __init__(self, env: gym.Env): super().__init__(env) + self._env = env self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + self.running_count_episode_length = 0.0 def reset(self) -> Tuple: # Reset the env - agents_view, info = self.env.reset() + agents_view, info = self._env.reset() - # Reset the metrics - self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + # Handle the Done when the auto reset happens + done = self.running_count_episode_length != -1 # Avoid setting the first ever done to True # Create the metrics dict metrics = { "episode_return": self.running_count_episode_return, - "episode_length": self.self.running_count_episode_length, - "is_terminal_step": False, + "episode_length": self.running_count_episode_length, + "is_terminal_step": done, } + + # Reset the metrics + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0 + if "won_episode" in info: metrics["won_episode"] = info["won_episode"] + + info["metrics"] = metrics - return agents_view, metrics + return agents_view, info def step(self, actions: NDArray) -> Tuple: # Step the env - agents_view, reward, terminated, truncated, info = self.env.step(actions) + agents_view, reward, terminated, truncated, info = self._env.step(actions) - # Update the metrics - done = np.logical_or(terminated, truncated).all() - - if not done: - self.running_count_episode_return += float(np.mean(reward)) - self.running_count_episode_length += 1 - - else: - self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + self.running_count_episode_return += float(np.mean(reward)) + self.running_count_episode_length += 1 metrics = { "episode_return": self.running_count_episode_return, - "episode_length": self.self.running_count_episode_length, - "is_terminal_step": False, + "episode_length": self.running_count_episode_length, + "is_terminal_step": False, # We handle the True case in the reset function since this gets overwritten } if "won_episode" in info: metrics["won_episode"] = info["won_episode"] + + info["metrics"] = metrics + + return agents_view, reward, terminated, truncated, info + +class AgentIDWrapper(gym.Wrapper): + """Add onehot agent IDs to observation.""" + + def __init__(self, env: gym.Env): + super().__init__(env) - return agents_view, reward, terminated, truncated, metrics + self.agent_ids = np.eye(self.env.num_agents) + _obs_low, _obs_high, _obs_dtype, _obs_shape = ( + self.env.observation_space.low[0][0], + self.env.observation_space.high[0][0], + self.env.observation_space.dtype, + self.env.observation_space.shape, + ) + _new_obs_shape = (self.env.num_agents, _obs_shape[1] + self.env.num_agents) + self._observation_space = Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) + + def reset(self) -> Tuple[np.ndarray, Dict]: + """Reset the environment.""" + obs, info = self.env.reset() + obs = np.concatenate([self.agent_ids, obs], axis=1) + return obs, info + + def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: + """Step the environment.""" + obs, reward, terminated, truncated, info = self.env.step(action) + obs = np.concatenate([self.agent_ids, obs], axis=1) + return obs, reward, terminated, truncated, info + + +def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + assert shared_memory is not None + env = env_fn() + observation_space = env.observation_space + parent_pipe.close() + try: + while True: + command, data = pipe.recv() + if command == "reset": + observation, info = env.reset(**data) + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, info), True)) + + elif command == "step": + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if np.logical_or(terminated, truncated).all(): + old_observation, old_info = observation, info + observation, info = env.reset() + info["final_observation"] = old_observation + info["final_info"] = old_info + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, reward, terminated, truncated, info), True)) + elif command == "seed": + env.seed(data) + pipe.send((None, True)) + elif command == "close": + pipe.send((None, True)) + break + elif command == "_call": + name, args, kwargs = data + if name in ["reset", "step", "seed", "close"]: + raise ValueError( + f"Trying to call function `{name}` with " + f"`_call`. Use `{name}` directly instead." + ) + function = getattr(env, name) + if callable(function): + pipe.send((function(*args, **kwargs), True)) + else: + pipe.send((function, True)) + elif command == "_setattr": + name, value = data + setattr(env, name, value) + pipe.send((None, True)) + elif command == "_check_spaces": + pipe.send( + ((data[0] == observation_space, data[1] == env.action_space), True) + ) + else: + raise RuntimeError( + f"Received unknown command `{command}`. Must " + "be one of {`reset`, `step`, `seed`, `close`, `_call`, " + "`_setattr`, `_check_spaces`}." + ) + except (KeyboardInterrupt, Exception): + error_queue.put((index,) + sys.exc_info()[:2]) + pipe.send((None, False)) + finally: + env.close() \ No newline at end of file From 7f43a33b63a63fbab41f4ce5673374ff76d4667f Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 2 Jul 2024 14:47:24 +0100 Subject: [PATCH 022/139] fix: logging and added LBF --- mava/configs/arch/sebulba.yaml | 10 +- mava/configs/env/gym.yaml | 6 +- mava/configs/system/ppo/ff_ippo.yaml | 8 +- mava/systems/sebulba/ppo/ff_ippo.py | 325 ++++++++++++++++----------- mava/systems/sebulba/ppo/test.py | 15 +- mava/utils/make_env.py | 17 +- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 86 +++++-- 8 files changed, 291 insertions(+), 178 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 02ae56bb3..617e54134 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,13 +1,13 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 64 # number of envs per thread +num_envs: 3 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. -num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. +num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 @@ -16,9 +16,3 @@ n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices -# --- Sebulba rollout and env config --- -concurrency: False # whether actor and learner should run concurrently -async_envs: True # "whether to use async vector or sync vector envs" - -# --- To be defined during training --- -log_frequency: ~ diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml index 44c9c624a..9ddd16d41 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym.yaml @@ -1,8 +1,8 @@ # ---Environment Configs--- -scenario: rware:rware-tiny-2ag-v1 # [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] +scenario: rware:rware-tiny-4ag-v1 #Foraging-8x8-2p-1f-v2 #rware:rware-tiny-2ag-v1 # [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] -env_name: RobotWarehouse # Used for logging purposes. +env_name: RobotWarehouse #LevelBasedForaging # Used for logging purposes. # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. @@ -10,7 +10,7 @@ eval_metric: episode_return # Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. # This should not be changed. -implicit_agent_id: True +implicit_agent_id: False # Whether or not to log the winrate of this environment. This should not be changed as not all # environments have a winrate metric. log_win_rate: False diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index b8d0573b4..0c93c2683 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -1,16 +1,16 @@ # --- Defaults FF-IPPO --- -total_timesteps: 20_000_000 # Set the total environment steps. +total_timesteps: ~ # Set the total environment steps. # If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. -num_updates: 1000 # Number of updates +num_updates: 12 # Number of updates seed: 42 # --- Agent observations --- add_agent_id: True # --- RL hyperparameters --- -actor_lr: 2.5e-4 # Learning rate for actor network -critic_lr: 2.5e-4 # Learning rate for critic network +actor_lr: 1.0e-3 # Learning rate for actor network +critic_lr: 1.0e-3 # Learning rate for critic network update_batch_size: 2 # Number of vectorised gradient updates per device. rollout_length: 128 # Number of environment steps per vectorised environment. ppo_epochs: 4 # Number of ppo epochs per training data batch. diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 229e268d0..5df32bf5d 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -68,7 +68,7 @@ def rollout( len_executor_device_ids = len(config.arch.executor_device_ids) current_actor_device = jax.devices()[actor_device_id] t_env = 0 - start_time = time.time() + actor_apply_fn, critic_apply_fn = apply_fns @@ -98,9 +98,9 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: # Define queues to track time - params_queue_get_time: deque = deque(maxlen=10) - rollout_time: deque = deque(maxlen=10) - rollout_queue_put_time: deque = deque(maxlen=10) + params_queue_get_time: deque = deque(maxlen=1) + rollout_time: deque = deque(maxlen=1) + rollout_queue_put_time: deque = deque(maxlen=1) next_obs , info = env.reset() #todo : the first info is discarded , is that a problem? next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) @@ -108,70 +108,77 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: move_to_device = lambda x : jax.device_put(x, device = current_actor_device) # Loop till the learner has finished training - for eval_step in range(config.arch.num_evaluation): - for update in range(1, config.system.num_updates_per_eval + 2): - # Setup - env_recv_time: float = 0 - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 + for update in range(config.system.num_updates): + print(update) + # Setup todo: double check tracking times + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 + setup = 0 - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) + + # Rollout + rollout_time_start = time.time() + storage: List = [] + + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): - # Rollout - rollout_time_start = time.time() - storage: List = [] - # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) - cached_next_dones = move_to_device(next_dones) - cashed_action_mask = move_to_device(jnp.stack([*info["actions_mask"]], axis = 0) ) #unpack the numpy object, find a more pythonic way? - - # Increment current timestep - t_env += ( - config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs - ) - - # Get action and value - inference_time_start = time.time() - # - ( - action, - log_prob, - value, - key, - ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) - inference_time += time.time() - inference_time_start - - # Step the environment - env_send_time_start = time.time() - cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env - next_dones = np.logical_or(terminated, truncated) - - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics (N_envs , N_metrics) -- > (N_metrics, N_envs) - # Append data to storage - env_send_time += time.time() - env_send_time_start - storage_time_start = time.time() - storage.append( - PPOTransition( - done=cached_next_dones, - action=action, - value=value, - reward=next_reward, - log_prob=log_prob, - obs=Observation(cached_next_obs, cashed_action_mask), - info=metrics, - )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 - ) - storage_time += time.time() - storage_time_start + # Cached for transition + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) + cached_next_dones = move_to_device(next_dones) + setup_start = time.time() + cashed_action_mask = move_to_device(np.stack(info["actions_mask"]) ) + setup += time.time() - setup_start + # Increment current timestep + t_env += ( + config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs + ) + + # Get action and value + inference_time_start = time.time() + # + ( + action, + log_prob, + value, + key, + ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) + inference_time += time.time() - inference_time_start + + # Step the environment + env_send_time_start = time.time() + cpu_action = jax.device_get(action) - rollout_time.append(time.time() - rollout_time_start) - + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env + env_send_time += time.time() - env_send_time_start + + + storage_time_start = time.time() + # Prepare the data + next_dones = np.logical_or(terminated, truncated) + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics (N_envs , N_metrics) -- > (N_metrics, N_envs) + + # Append data to storage + storage.append( + PPOTransition( + done=cached_next_dones, + action=action, + value=value, + reward=next_reward, + log_prob=log_prob, + obs=Observation(cached_next_obs, cashed_action_mask), + info=metrics, + )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 + ) + storage_time += time.time() - storage_time_start + rollout_time.append(time.time() - rollout_time_start) + + parse_timer = time.time() # Prepare data to share with learner # todo: investigate te thread --> single learning partitioned_storage = prepare_data(storage) @@ -184,15 +191,27 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: sharded_next_action_mask = shard_split_payload(jnp.stack([*info["actions_mask"]], axis = 0), 0) sharded_next_done = shard_split_payload(next_dones, 0) + + speed_info = { + "rollout_time": np.mean(rollout_time), + "params_queue_get_time": np.mean(params_queue_get_time), + "action_inference": inference_time, + "storage_time": storage_time, + "env_step_time": env_send_time, + "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, + "parse_time" : time.time() - parse_timer, + "setup_time" : setup, + } + #print(speed_info) + payload = ( t_env, sharded_storage, sharded_next_obs, sharded_next_done, - sharded_next_action_mask, - np.mean(params_queue_get_time), + sharded_next_action_mask ) - + # Put data in the rollout queue to share it with the learner rollout_queue_put_time_start = time.time() rollout_queue.put(payload) @@ -210,7 +229,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, _: Any, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -340,7 +359,7 @@ def _critic_loss_fn( # available at https://tinyurl.com/26tdzs5x # pmean over devices. actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="device" + (actor_grads, actor_loss_info), axis_name="device" #todo: pmean over learner devices not all ) # pmean over devices. @@ -406,7 +425,7 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state learner_state = LearnerState(params, opt_states, key, None, None) - metric = traj_batch.info #todo: metrci calcualtions + metric = traj_batch.info return learner_state, (metric, loss_info) def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: @@ -424,12 +443,9 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs - env_state (LogEnvState): The environment state. - timesteps (TimeStep): The initial timestep in the initial trajectory. """ - # Broadcast static parameters for scan - partial_update_step = lambda learner_state, xs : _update_step(learner_state, xs, traj_batch , last_obs, last_action_mask, last_dones) + - learner_state, (episode_info, loss_info) = jax.lax.scan( - partial_update_step, learner_state, None, config.system.num_updates_per_eval - ) + learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_action_mask, last_dones) return ExperimentOutput( learner_state=learner_state, @@ -534,15 +550,13 @@ def run_experiment(_config: DictConfig) -> float: """Runs experiment.""" config = copy.deepcopy(_config) - devices = jax.devices() # todo: use local devices insted? + devices = jax.devices() learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] # PRNG keys. key, key_e, actor_net_key, critic_net_key = jax.random.split( jax.random.PRNGKey(config.system.seed), num=4 ) - - learner_keys = jax.device_put_replicated(key, learner_devices) # Sanity check of config assert ( @@ -624,77 +638,94 @@ def run_experiment(_config: DictConfig) -> float: learner_devices, d_id, ), - ).start() + ).start() #todo : this is techinically only multu threaded not multi processepr? # Run experiment for the total number of updates. - rollout_queue_get_time: deque = deque(maxlen=10) - data_transfer_time: deque = deque(maxlen=10) - trainer_update_number = 0 max_episode_return = jnp.float32(0.0) best_params = None - for eval_step in range(config.arch.num_evaluation): #todo : place holder - trainer_update_number += 1 - start_time = time.time() - sharded_storages = [] - sharded_next_obss = [] - sharded_next_dones = [] - sharded_next_action_masks = [] - - # Loop through each executor device - for d_idx, _ in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - # Get data from rollout queue - ( - t_env, - sharded_storage, - sharded_next_obs, - sharded_next_done, - sharded_next_action_mask, - avg_params_queue_get_time, - ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() - sharded_storages.append(sharded_storage) - sharded_next_obss.append(sharded_next_obs) - sharded_next_dones.append(sharded_next_done) - sharded_next_action_masks.append(sharded_next_action_mask) - rollout_queue_get_time.append(time.time() - start_time) - training_time_start = time.time() + for eval_step in range(config.arch.num_evaluation): + training_start_time = time.time() + learner_speeds = [] + rollout_times = [] - #Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) #todo: check if this breaks the explicet array device placment - sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) - sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) - sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) - - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) + episode_metrics = [] + train_metrics = [] - # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - device_params = jax.device_put(unreplicated_params, devices[d_id]) - for thread_id in range(config.arch.n_threads_per_executor): - params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( - device_params - ) - + for update in range(config.system.num_updates_per_eval): + sharded_storages = [] + sharded_next_obss = [] + sharded_next_dones = [] + sharded_next_action_masks = [] + + rollout_start_time = time.time() + # Loop through each executor device + for d_idx, _ in enumerate(config.arch.executor_device_ids): + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + # Get data from rollout queue + ( + t_env, + sharded_storage, + sharded_next_obs, + sharded_next_done, + sharded_next_action_mask + ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() + sharded_storages.append(sharded_storage) + sharded_next_obss.append(sharded_next_obs) + sharded_next_dones.append(sharded_next_done) + sharded_next_action_masks.append(sharded_next_action_mask) + + rollout_times.append(time.time() - rollout_start_time) + + + # Concatinate the returned trajectories on the n_env axis + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) #todo: check if this breaks the explicet array device placment + sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) + sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) + sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) + + + learner_start_time = time.time() + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) + learner_speeds.append(time.time() - learner_start_time) + + # Stack the metrics + episode_metrics.append(learner_output.episode_metrics) + train_metrics.append(learner_output.train_metrics) + + # Send updated params to executors + unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) + for d_idx, d_id in enumerate(config.arch.executor_device_ids): + device_params = jax.device_put(unreplicated_params, devices[d_id]) + for thread_id in range(config.arch.n_threads_per_executor): + params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( + device_params + ) + + + # Log the results of the training. - elapsed_time = time.time() - start_time + elapsed_time = time.time() - training_start_time t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) # todo: these shapes are not as expected - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time # Separately log timesteps, actoring metrics and training metrics. - logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} + logger.log(speed_info , t, eval_step, LogEvent.MISC) if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) + logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) # Evaluation on the learner + evaluation_start_timer = time.time() key_e, eval_key = jax.random.split(key_e, 2) episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) # Log the results of the evaluation. - elapsed_time = time.time() - start_time + elapsed_time = time.time() - evaluation_start_timer episode_return = jnp.mean(episode_metrics["episode_return"]) steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) @@ -706,8 +737,32 @@ def run_experiment(_config: DictConfig) -> float: if config.arch.absolute_metric and max_episode_return <= episode_return: best_params = copy.deepcopy(learner_output.learner_state.params) max_episode_return = episode_return - #todo: abs metric - return None#eval_performance + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + start_time = time.time() + + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) + + elapsed_time = time.time() - start_time + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + @hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py index 5e45544f1..d1f34fccf 100644 --- a/mava/systems/sebulba/ppo/test.py +++ b/mava/systems/sebulba/ppo/test.py @@ -39,7 +39,8 @@ from flax import linen as nn import gym import rware -from mava.wrappers import GymRwareWrapper, GymRecordEpisodeMetrics, _multiagent_worker_shared_memory +import lbforaging +from mava.wrappers import GymRwareWrapper, GymRecordEpisodeMetrics, _multiagent_worker_shared_memory, GymAgentIDWrapper, GymLBFWrapper @hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" @@ -49,7 +50,8 @@ def hydra_entry_point(cfg: DictConfig) -> float: OmegaConf.set_struct(cfg, False) def f(): base = gym.make(cfg.env.scenario) - base = GymRwareWrapper(base, cfg.env.use_individual_rewards, False, True) + base = GymLBFWrapper(base, cfg.env.use_individual_rewards, True) + base = GymAgentIDWrapper(base) return GymRecordEpisodeMetrics(base) base = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names @@ -62,13 +64,14 @@ def f(): base.reset() n = 0 done = False + r = [0] * 3 while not done: n+= 1 - agents_view, reward, terminated, truncated, info = base.step([[0,0,0], [0,0,0]]) + agents_view, reward, terminated, truncated, info = base.step([r, r]) + print(terminated, truncated) done = np.logical_or(terminated, truncated).all() - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - print(n, done, terminated, np.logical_or(terminated, truncated).shape, metrics) - done = True + print(n, done) + #metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) base.close() print(done) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index cab649880..c23e40820 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -22,6 +22,7 @@ import jumanji import matrax from gigastep import ScenarioBuilder +import lbforaging from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment from jumanji.environments.routing.cleaner.generator import ( @@ -46,7 +47,9 @@ GigastepWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, + GymAgentIDWrapper, _multiagent_worker_shared_memory, + GymLBFWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -70,7 +73,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"rware": GymRwareWrapper} +_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging" : GymLBFWrapper} def add_extra_wrappers( @@ -209,7 +212,7 @@ def make_gigastep_env( def make_gym_env( - config: DictConfig, num_env : int, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, num_env : int, add_global_state: bool = False, ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -222,23 +225,23 @@ def make_gym_env( Returns: A tuple of the environments. """ - base_env_name = config.env.scenario.split(":")[0] + base_env_name = config.env.env_name wrapper = _gym_registry[base_env_name] def create_gym_env( - config: DictConfig, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, add_global_state: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) - wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) + wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state) if not config.env.implicit_agent_id: - wrapped_env = AgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . + wrapped_env = GymAgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env num_env = config.arch.num_envs envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names [ - lambda: create_gym_env(config, add_global_state, eval_env=eval_env) + lambda: create_gym_env(config, add_global_state) for _ in range(num_env) ], worker=_multiagent_worker_shared_memory diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 3608b1d10..64a5affec 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, _multiagent_worker_shared_memory +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymLBFWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 546e05614..31146e29a 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -19,7 +19,7 @@ import numpy as np from numpy.typing import NDArray -from gym.spaces import Box +from gym import spaces from gym.vector.utils import write_to_shared_memory import sys @@ -51,7 +51,6 @@ def __init__( self._env = env #not having _env leaded tp self.env getting replaced --> circular called self.use_individual_rewards = use_individual_rewards self.add_global_state = add_global_state # todo : add the global observations - self.eval_env = eval_env self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ 0 @@ -88,6 +87,66 @@ def get_actions_mask(self, info: Dict) -> NDArray: return np.ones((self.num_agents, self.num_actions), dtype=np.float32) +class GymLBFWrapper(gym.Wrapper): + """Wrapper for rware gym environments""" + + def __init__( + self, + env: gym.Env, + use_individual_rewards: bool = False, + add_global_state: bool = False, + ): + """Initialize the gym wrapper + + Args: + env (gym.env): gym env instance. + use_individual_rewards (bool, optional): Use individual or group rewards. + Defaults to False. + add_global_state (bool, optional) : Create global observations. Defaults to False. + """ + super().__init__(env) + self._env = env #not having _env leaded tp self.env getting replaced --> circular called + self.use_individual_rewards = use_individual_rewards + self.add_global_state = add_global_state # todo : add the global observations + self.num_agents = len(self._env.action_space) + self.num_actions = self._env.action_space[ + 0 + ].n # todo: all the agents must have the same num_actions, add assertion? + + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple: + + if seed is not None: + self.env.seed(seed) + + agents_view, info = self._env.reset() + + info = {"actions_mask": self.get_actions_mask(info)} + + return np.array(agents_view), info + + def step(self, actions: NDArray) -> Tuple: #Vect auto rest + + agents_view, reward, terminated, truncated, info = self._env.step(actions) + + info = {"actions_mask": self.get_actions_mask(info)} + + if self.use_individual_rewards: + reward = np.array(reward) + else: + reward = np.array([np.array(reward).mean()] * self.num_agents) + + + truncated = [truncated] * self.num_agents + + return agents_view, reward, terminated, truncated, info + + def get_actions_mask(self, info: Dict) -> NDArray: + if "action_mask" in info: + return np.array(info["action_mask"]) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" @@ -102,14 +161,11 @@ def reset(self) -> Tuple: # Reset the env agents_view, info = self._env.reset() - # Handle the Done when the auto reset happens - done = self.running_count_episode_length != -1 # Avoid setting the first ever done to True - # Create the metrics dict metrics = { "episode_return": self.running_count_episode_return, "episode_length": self.running_count_episode_length, - "is_terminal_step": done, + "is_terminal_step": True, } # Reset the metrics @@ -140,24 +196,26 @@ def step(self, actions: NDArray) -> Tuple: metrics["won_episode"] = info["won_episode"] info["metrics"] = metrics - + return agents_view, reward, terminated, truncated, info -class AgentIDWrapper(gym.Wrapper): +class GymAgentIDWrapper(gym.Wrapper): """Add onehot agent IDs to observation.""" def __init__(self, env: gym.Env): super().__init__(env) self.agent_ids = np.eye(self.env.num_agents) + observation_space = self.env.observation_space[0] _obs_low, _obs_high, _obs_dtype, _obs_shape = ( - self.env.observation_space.low[0][0], - self.env.observation_space.high[0][0], - self.env.observation_space.dtype, - self.env.observation_space.shape, + observation_space.low[0], + observation_space.high[0], + observation_space.dtype, + observation_space.shape, ) - _new_obs_shape = (self.env.num_agents, _obs_shape[1] + self.env.num_agents) - self._observation_space = Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) + _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) + _observation_boxs = [spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype)] * self.env.num_agents + self.observation_space = spaces.Tuple(_observation_boxs) def reset(self) -> Tuple[np.ndarray, Dict]: """Reset the environment.""" From 8a872587571b88da959aaea86802645cde827bfc Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 4 Jul 2024 10:02:43 +0100 Subject: [PATCH 023/139] fix: batch size calc for multiple devices --- mava/systems/sebulba/ppo/ff_ippo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 5df32bf5d..7ff158536 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -398,7 +398,7 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) # SHUFFLE MINIBATCHES - batch_size = config.system.rollout_length * config.arch.num_envs * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor + batch_size = config.system.rollout_length * (config.arch.num_envs // len(config.arch.learner_device_ids)) * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) From 7f0acd9eb878a54f0c8a0af9c450d3543bebf911 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 5 Jul 2024 11:16:06 +0100 Subject: [PATCH 024/139] fix: num_updates and code refactoring --- mava/systems/sebulba/ppo/ff_ippo.py | 47 ++++++++++++----------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 7ff158536..8998de5f3 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -89,14 +89,6 @@ def get_action_and_value( value = critic_apply_fn(params.critic_params, observation).squeeze() return action, log_prob, value, key - @jax.jit - def prepare_data(storage: List[PPOTransition]) -> PPOTransition: - """Prepare data to share with learner.""" - return jax.tree_map( # type: ignore - lambda *xs : jnp.stack(xs), *storage - ) - - # Define queues to track time params_queue_get_time: deque = deque(maxlen=1) rollout_time: deque = deque(maxlen=1) @@ -109,12 +101,9 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: # Loop till the learner has finished training for update in range(config.system.num_updates): - print(update) - # Setup todo: double check tracking times inference_time: float = 0 storage_time: float = 0 env_send_time: float = 0 - setup = 0 # Get the latest parameters from the learner params_queue_get_time_start = time.time() @@ -131,9 +120,8 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: # Cached for transition cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) cached_next_dones = move_to_device(next_dones) - setup_start = time.time() cashed_action_mask = move_to_device(np.stack(info["actions_mask"]) ) - setup += time.time() - setup_start + # Increment current timestep t_env += ( config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs @@ -141,15 +129,14 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: # Get action and value inference_time_start = time.time() - # ( action, log_prob, value, key, ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) - inference_time += time.time() - inference_time_start + inference_time += time.time() - inference_time_start # Step the environment env_send_time_start = time.time() cpu_action = jax.device_get(action) @@ -161,7 +148,7 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: storage_time_start = time.time() # Prepare the data next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics (N_envs , N_metrics) -- > (N_metrics, N_envs) + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics # Append data to storage storage.append( @@ -173,22 +160,23 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: log_prob=log_prob, obs=Observation(cached_next_obs, cashed_action_mask), info=metrics, - )#todo: use a threadsafe alt https://github.com/instadeepai/CityLearn/blob/27e69f8ebdf1789c55ffab5c326bfaa50733a5e7/power_systems/sax_sebulba.py#L39 + ) ) storage_time += time.time() - storage_time_start rollout_time.append(time.time() - rollout_time_start) parse_timer = time.time() + # Prepare data to share with learner - # todo: investigate te thread --> single learning - partitioned_storage = prepare_data(storage) + stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) + #sorage has shape rollout_len, num_agents, num_envs, .... while the other vectors have num_agents, num_envs, ... -> their split axis is diffrent shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) - sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , partitioned_storage) + sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) - sharded_next_action_mask = shard_split_payload(jnp.stack([*info["actions_mask"]], axis = 0), 0) + sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) sharded_next_done = shard_split_payload(next_dones, 0) @@ -200,7 +188,6 @@ def prepare_data(storage: List[PPOTransition]) -> PPOTransition: "env_step_time": env_send_time, "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, "parse_time" : time.time() - parse_timer, - "setup_time" : setup, } #print(speed_info) @@ -581,13 +568,14 @@ def run_experiment(_config: DictConfig) -> float: evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config) #todo: make this more generic # Calculate total timesteps. - config = sebulba_check_total_timesteps(config) #todo: update this for sebulba + config = sebulba_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." # Calculate number of updates per evaluation. - config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) + config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation if the num_updates is not a multiple of num_evaluation steps_per_rollout = ( len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor @@ -638,8 +626,9 @@ def run_experiment(_config: DictConfig) -> float: learner_devices, d_id, ), - ).start() #todo : this is techinically only multu threaded not multi processepr? - + ).start() #todo : Use a process insted of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) + + # Run experiment for the total number of updates. max_episode_return = jnp.float32(0.0) best_params = None @@ -651,7 +640,9 @@ def run_experiment(_config: DictConfig) -> float: episode_metrics = [] train_metrics = [] - for update in range(config.system.num_updates_per_eval): + # Make sure that the + num_updates_in_eval = config.system.num_updates_per_eva if eval_step != config.arch.num_evaluation - 1 else remaining_updates + for update in range(num_updates_in_eval): sharded_storages = [] sharded_next_obss = [] sharded_next_dones = [] @@ -679,7 +670,7 @@ def run_experiment(_config: DictConfig) -> float: # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) #todo: check if this breaks the explicet array device placment + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) From 3e352cffc37db558ec4e324a4afe6e56dd6fa1c8 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 8 Jul 2024 11:41:15 +0100 Subject: [PATCH 025/139] chore : code cleanup + comments + added checkpoint save --- mava/systems/sebulba/ppo/ff_ippo.py | 71 ++++++++++++----------------- mava/systems/sebulba/ppo/types.py | 1 + 2 files changed, 31 insertions(+), 41 deletions(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 8998de5f3..f2168cf63 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -32,7 +32,7 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_sebulba_eval_fns as make_eval_fns #todo: make a standered eval function +from mava.evaluator import make_sebulba_eval_fns as make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.sebulba.ppo.types import LearnerState, OptStates, Params, PPOTransition, Observation #todo: change this Observation to use the origial one @@ -55,21 +55,13 @@ def rollout( config: DictConfig, rollout_queue: queue.Queue, params_queue: queue.Queue, - device_thread_id: int, apply_fns: Tuple, - logger: MavaLogger, learner_devices: List, actor_device_id : int): - - #create envs - env = environments.make_gym_env(config, config.arch.num_envs) - + #setup - len_executor_device_ids = len(config.arch.executor_device_ids) + env = environments.make_gym_env(config, config.arch.num_envs) current_actor_device = jax.devices()[actor_device_id] - t_env = 0 - - actor_apply_fn, critic_apply_fn = apply_fns # Define the util functions: select action function and prepare data to share it with learner. @@ -94,7 +86,7 @@ def get_action_and_value( rollout_time: deque = deque(maxlen=1) rollout_queue_put_time: deque = deque(maxlen=1) - next_obs , info = env.reset() #todo : the first info is discarded , is that a problem? + next_obs , info = env.reset() next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) move_to_device = lambda x : jax.device_put(x, device = current_actor_device) @@ -118,14 +110,9 @@ def get_action_and_value( for _ in range(0, config.system.rollout_length): # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) - cached_next_dones = move_to_device(next_dones) - cashed_action_mask = move_to_device(np.stack(info["actions_mask"]) ) - - # Increment current timestep - t_env += ( - config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs - ) + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) + cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) + cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) # Get action and value inference_time_start = time.time() @@ -136,17 +123,16 @@ def get_action_and_value( key, ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) - inference_time += time.time() - inference_time_start + # Step the environment + inference_time += time.time() - inference_time_start env_send_time_start = time.time() cpu_action = jax.device_get(action) - - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) #num_env, num_agents --> num_agents, num_env + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) env_send_time += time.time() - env_send_time_start - - storage_time_start = time.time() # Prepare the data + storage_time_start = time.time() next_dones = np.logical_or(terminated, truncated) metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics @@ -168,18 +154,21 @@ def get_action_and_value( parse_timer = time.time() # Prepare data to share with learner - stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) + #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] + stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) - #sorage has shape rollout_len, num_agents, num_envs, .... while the other vectors have num_agents, num_envs, ... -> their split axis is diffrent + + # Split the arrays over the different learner_devices on the num_envs axis shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) - sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) + sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) - sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) + # (num_learner_devices, num_envs, num_agents, ...) + sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) sharded_next_done = shard_split_payload(next_dones, 0) - + # For debugging speed_info = { "rollout_time": np.mean(rollout_time), "params_queue_get_time": np.mean(params_queue_get_time), @@ -192,7 +181,6 @@ def get_action_and_value( #print(speed_info) payload = ( - t_env, sharded_storage, sharded_next_obs, sharded_next_done, @@ -447,8 +435,6 @@ def learner_setup( keys: chex.Array, config: DictConfig, learner_devices: List ) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: """Initialise learner_fn, network, optimiser, environment and states.""" - # Get available TPU cores. - n_devices = len(learner_devices) #create temporory envoirnments. env = environments.make_gym_env(config, config.arch.num_envs) @@ -502,7 +488,7 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) - # Get batched iterated update and replicate it to pmap it over cores. + # Get batched iterated update and replicate it to pmap it over learner cores. learn = get_learner_fn(apply_fns, update_fns, config) learn = jax.pmap(learn, axis_name="device", devices = learner_devices) @@ -575,7 +561,7 @@ def run_experiment(_config: DictConfig) -> float: # Calculate number of updates per evaluation. config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) - config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation if the num_updates is not a multiple of num_evaluation + config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation steps_per_rollout = ( len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor @@ -620,13 +606,11 @@ def run_experiment(_config: DictConfig) -> float: config, rollout_queues[-1], params_queues[-1], - d_idx * config.arch.n_threads_per_executor + thread_id, apply_fns, - logger, learner_devices, d_id, ), - ).start() #todo : Use a process insted of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) + ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) # Run experiment for the total number of updates. @@ -641,7 +625,7 @@ def run_experiment(_config: DictConfig) -> float: train_metrics = [] # Make sure that the - num_updates_in_eval = config.system.num_updates_per_eva if eval_step != config.arch.num_evaluation - 1 else remaining_updates + num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates for update in range(num_updates_in_eval): sharded_storages = [] sharded_next_obss = [] @@ -655,7 +639,6 @@ def run_experiment(_config: DictConfig) -> float: for thread_id in range(config.arch.n_threads_per_executor): # Get data from rollout queue ( - t_env, sharded_storage, sharded_next_obs, sharded_next_done, @@ -723,7 +706,13 @@ def run_experiment(_config: DictConfig) -> float: episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - #todo: add saving + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), + episode_return=episode_return, + ) if config.arch.absolute_metric and max_episode_return <= episode_return: best_params = copy.deepcopy(learner_output.learner_state.params) diff --git a/mava/systems/sebulba/ppo/types.py b/mava/systems/sebulba/ppo/types.py index 6e02aa904..c27dcace5 100644 --- a/mava/systems/sebulba/ppo/types.py +++ b/mava/systems/sebulba/ppo/types.py @@ -88,6 +88,7 @@ class RNNPPOTransition(NamedTuple): log_prob: chex.Array obs: chex.Array hstates: HiddenStates + info: Dict class Observation(NamedTuple): From bcdaa381096b8c843127b051020af8c99d139c52 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 8 Jul 2024 14:53:56 +0100 Subject: [PATCH 026/139] feat: mappo + removed sebulba specifique types and made the rware wrapper generic --- mava/evaluator.py | 8 +- mava/systems/sebulba/ppo/ff_ippo.py | 28 +- mava/systems/sebulba/ppo/ff_mappo.py | 768 +++++++++++++++++++++++++++ mava/types.py | 6 +- mava/utils/make_env.py | 6 +- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 80 +-- 7 files changed, 807 insertions(+), 91 deletions(-) create mode 100644 mava/systems/sebulba/ppo/ff_mappo.py diff --git a/mava/evaluator.py b/mava/evaluator.py index 066890ed9..f44a8d55b 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -31,7 +31,8 @@ RNNEvalState, ) -from mava.systems.sebulba.ppo.types import Observation +from mava.types import Observation + import numpy as np def get_anakin_ff_evaluator_fn( @@ -383,7 +384,7 @@ def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: key, policy_key = jax.random.split(key) obs = jax.device_put(jnp.stack(obs, axis = 1)) - action_mask = jax.device_put(jnp.stack([*info["actions_mask"]], axis = 0)) + action_mask = jax.device_put(np.stack(info["actions_mask"]) ) actions = get_action(params, Observation(obs, action_mask), policy_key) cpu_action = jax.device_get(actions) @@ -409,6 +410,7 @@ def make_sebulba_eval_fns( eval_env_fn: callable, network_apply_fn: Union[ActorApply, RecActorApply], config: DictConfig, + add_global_state : bool = False, use_recurrent_net: bool = False, scanned_rnn: Optional[nn.Module] = None, ) -> Tuple[EvalFn, EvalFn]: @@ -429,7 +431,7 @@ def make_sebulba_eval_fns( Raises: AssertionError: If `use_recurrent_net` is True but `scanned_rnn` is not provided. """ - eval_env, absolute_eval_env = eval_env_fn(config, config.arch.num_eval_episodes), eval_env_fn(config, config.arch.num_eval_episodes * 10) + eval_env, absolute_eval_env = eval_env_fn(config, config.arch.num_eval_episodes, add_global_state = add_global_state), eval_env_fn(config, config.arch.num_eval_episodes * 10, add_global_state = add_global_state) # Check if win rate is required for evaluation. log_win_rate = config.env.log_win_rate diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index f2168cf63..30e5bacbf 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -35,8 +35,8 @@ from mava.evaluator import make_sebulba_eval_fns as make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.sebulba.ppo.types import LearnerState, OptStates, Params, PPOTransition, Observation #todo: change this Observation to use the origial one -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import ( @@ -167,6 +167,9 @@ def get_action_and_value( sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) sharded_next_done = shard_split_payload(next_dones, 0) + + # Pack the obs and action mask + payload_obs = Observation(sharded_next_obs, sharded_next_action_mask) # For debugging speed_info = { @@ -182,9 +185,8 @@ def get_action_and_value( payload = ( sharded_storage, - sharded_next_obs, + payload_obs, sharded_next_done, - sharded_next_action_mask ) # Put data in the rollout queue to share it with the learner @@ -204,7 +206,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -246,7 +248,7 @@ def _get_advantages( # CALCULATE ADVANTAGE params, opt_states, key, _, _ = learner_state - last_val = critic_apply_fn(params.critic_params, Observation(last_obs, last_action_mask)) + last_val = critic_apply_fn(params.critic_params, last_obs) advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) def _update_epoch(update_state: Tuple, _: Any) -> Tuple: @@ -403,7 +405,7 @@ def _critic_loss_fn( metric = traj_batch.info return learner_state, (metric, loss_info) - def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_action_mask : chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: + def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: """Learner function. This function represents the learner, it updates the network parameters @@ -420,7 +422,7 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs """ - learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_action_mask, last_dones) + learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones) return ExperimentOutput( learner_state=learner_state, @@ -630,7 +632,6 @@ def run_experiment(_config: DictConfig) -> float: sharded_storages = [] sharded_next_obss = [] sharded_next_dones = [] - sharded_next_action_masks = [] rollout_start_time = time.time() # Loop through each executor device @@ -642,25 +643,22 @@ def run_experiment(_config: DictConfig) -> float: sharded_storage, sharded_next_obs, sharded_next_done, - sharded_next_action_mask ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() sharded_storages.append(sharded_storage) sharded_next_obss.append(sharded_next_obs) sharded_next_dones.append(sharded_next_done) - sharded_next_action_masks.append(sharded_next_action_mask) - + rollout_times.append(time.time() - rollout_start_time) # Concatinate the returned trajectories on the n_env axis sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) - sharded_next_obss = jnp.concatenate(sharded_next_obss, axis = 1) + sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) - sharded_next_action_masks = jnp.concatenate(sharded_next_action_masks, axis = 1) learner_start_time = time.time() - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_action_masks, sharded_next_dones) + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) learner_speeds.append(time.time() - learner_start_time) # Stack the metrics diff --git a/mava/systems/sebulba/ppo/ff_mappo.py b/mava/systems/sebulba/ppo/ff_mappo.py new file mode 100644 index 000000000..5f84fd0d0 --- /dev/null +++ b/mava/systems/sebulba/ppo/ff_mappo.py @@ -0,0 +1,768 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from typing import Any, Dict, Tuple, List +import threading +import chex +import flax +import hydra +import jax +import jax.debug +import jax.numpy as jnp +import numpy as np +import optax +import queue +from collections import deque +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from omegaconf import DictConfig, OmegaConf +from optax._src.base import OptState +from rich.pretty import pprint + +from mava.evaluator import make_sebulba_eval_fns as make_eval_fns +from mava.networks import FeedForwardActor as Actor +from mava.networks import FeedForwardValueNet as Critic +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this Observation to use the standard obs +from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, ObservationGlobalState +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + merge_leading_dims, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.total_timestep_checker import sebulba_check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def rollout( + key: chex.PRNGKey, + config: DictConfig, + rollout_queue: queue.Queue, + params_queue: queue.Queue, + apply_fns: Tuple, + learner_devices: List, + actor_device_id : int): + + #setup + env = environments.make_gym_env(config, config.arch.num_envs, add_global_state=True) + current_actor_device = jax.devices()[actor_device_id] + actor_apply_fn, critic_apply_fn = apply_fns + + # Define the util functions: select action function and prepare data to share it with learner. + @jax.jit + def get_action_and_value( + params: FrozenDict, + observation: ObservationGlobalState, + key: chex.PRNGKey, + ) -> Tuple: + """Get action and value.""" + key, subkey = jax.random.split(key) + + actor_policy = actor_apply_fn(params.actor_params, observation) + action = actor_policy.sample(seed=subkey) + log_prob = actor_policy.log_prob(action) + + value = critic_apply_fn(params.critic_params, observation).squeeze() + return action, log_prob, value, key + + # Define queues to track time + params_queue_get_time: deque = deque(maxlen=1) + rollout_time: deque = deque(maxlen=1) + rollout_queue_put_time: deque = deque(maxlen=1) + + next_obs , info = env.reset() + next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + + move_to_device = lambda x : jax.device_put(x, device = current_actor_device) + + # Loop till the learner has finished training + for update in range(config.system.num_updates): + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 + + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) + + # Rollout + rollout_time_start = time.time() + storage: List = [] + + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): + + # Cached for transition + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) + cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) + cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) + cached_next_global_obs = move_to_device(np.stack(info["global_obs"])) + + + # Get action and value + full_observation = ObservationGlobalState(cached_next_obs, cashed_action_mask, cached_next_global_obs) + inference_time_start = time.time() + ( + action, + log_prob, + value, + key, + ) = get_action_and_value(params, full_observation , key) + + + # Step the environment + inference_time += time.time() - inference_time_start + env_send_time_start = time.time() + cpu_action = jax.device_get(action) + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) + env_send_time += time.time() - env_send_time_start + + # Prepare the data + storage_time_start = time.time() + next_dones = np.logical_or(terminated, truncated) + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics + + # Append data to storage + storage.append( + PPOTransition( + done=cached_next_dones, + action=action, + value=value, + reward=next_reward, + log_prob=log_prob, + obs=full_observation, + info=metrics, + ) + ) + storage_time += time.time() - storage_time_start + rollout_time.append(time.time() - rollout_time_start) + + parse_timer = time.time() + + # Prepare data to share with learner + #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] + stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) + + + # Split the arrays over the different learner_devices on the num_envs axis + shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) + + sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) + + # (num_learner_devices, num_envs, num_agents, ...) + sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) + sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) + sharded_next_global_obs = shard_split_payload(np.stack(info["global_obs"]), 0) + sharded_next_done = shard_split_payload(next_dones, 0) + + # Pack the obs and action mask + payload_obs = ObservationGlobalState(sharded_next_obs, sharded_next_action_mask, sharded_next_global_obs) + + # For debugging + speed_info = { + "rollout_time": np.mean(rollout_time), + "params_queue_get_time": np.mean(params_queue_get_time), + "action_inference": inference_time, + "storage_time": storage_time, + "env_step_time": env_send_time, + "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, + "parse_time" : time.time() - parse_timer, + } + #print(speed_info) + + payload = ( + sharded_storage, + payload_obs, + sharded_next_done, + ) + + # Put data in the rollout queue to share it with the learner + rollout_queue_put_time_start = time.time() + rollout_queue.put(payload) + rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) + + +def get_learner_fn( + apply_fns: Tuple[ActorApply, CriticApply], + update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], + config: DictConfig, +) -> LearnerFn[LearnerState]: + """Get the learner function.""" + + # Get apply and update functions for actor and critic networks. + actor_apply_fn, critic_apply_fn = apply_fns + actor_update_fn, critic_update_fn = update_fns + + def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + learner_state (NamedTuple): + - params (Params): The current model parameters. + - opt_states (OptStates): The current optimizer states. + - key (PRNGKey): The random number generator state. + - env_state (State): The environment state. + - last_timestep (TimeStep): The last timestep in the current trajectory. + _ (Any): The current metrics info. + """ + + def _calculate_gae( #todo: lake sure this is appropriate + traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array + ) -> Tuple[chex.Array, chex.Array]: + def _get_advantages( + carry: Tuple[chex.Array, chex.Array, chex.Array], transition: PPOTransition + ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: + gae, next_value, next_done = carry + done, value, reward = transition.done, transition.value, transition.reward + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - next_done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae + return (gae, value, done), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(last_val), last_val, last_done), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + # CALCULATE ADVANTAGE + params, opt_states, key, _, _ = learner_state + last_val = critic_apply_fn(params.critic_params, last_obs) + advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_states, key = train_state + traj_batch, advantages, targets = batch_info + + def _actor_loss_fn( + actor_params: FrozenDict, + actor_opt_state: OptState, + traj_batch: PPOTransition, + gae: chex.Array, + key: chex.PRNGKey, + ) -> Tuple: + """Calculate the actor loss.""" + # RERUN NETWORK + actor_policy = actor_apply_fn(actor_params, traj_batch.obs) + log_prob = actor_policy.log_prob(traj_batch.action) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + # The seed will be used in the TanhTransformedDistribution: + entropy = actor_policy.entropy(seed=key).mean() + + total_loss_actor = loss_actor - config.system.ent_coef * entropy + return total_loss_actor, (loss_actor, entropy) + + def _critic_loss_fn( + critic_params: FrozenDict, + critic_opt_state: OptState, + traj_batch: PPOTransition, + targets: chex.Array, + ) -> Tuple: + """Calculate the critic loss.""" + # RERUN NETWORK + value = critic_apply_fn(critic_params, traj_batch.obs) + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + value_losses = jnp.square(value - targets) + value_losses_clipped = jnp.square(value_pred_clipped - targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + critic_total_loss = config.system.vf_coef * value_loss + return critic_total_loss, (value_loss) + + # CALCULATE ACTOR LOSS + key, entropy_key = jax.random.split(key) + actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) + actor_loss_info, actor_grads = actor_grad_fn( + params.actor_params, + opt_states.actor_opt_state, + traj_batch, + advantages, + entropy_key, + ) + + # CALCULATE CRITIC LOSS + critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) + critic_loss_info, critic_grads = critic_grad_fn( + params.critic_params, opt_states.critic_opt_state, traj_batch, targets + ) + + # Compute the parallel mean (pmean) over the batch. + # This calculation is inspired by the Anakin architecture demo notebook. + # available at https://tinyurl.com/26tdzs5x + # pmean over devices. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="device" #todo: pmean over learner devices not all + ) + + # pmean over devices. + critic_grads, critic_loss_info = jax.lax.pmean( + (critic_grads, critic_loss_info), axis_name="device" + ) + + # UPDATE ACTOR PARAMS AND OPTIMISER STATE + actor_updates, actor_new_opt_state = actor_update_fn( + actor_grads, opt_states.actor_opt_state + ) + actor_new_params = optax.apply_updates(params.actor_params, actor_updates) + + # UPDATE CRITIC PARAMS AND OPTIMISER STATE + critic_updates, critic_new_opt_state = critic_update_fn( + critic_grads, opt_states.critic_opt_state + ) + critic_new_params = optax.apply_updates(params.critic_params, critic_updates) + + # PACK NEW PARAMS AND OPTIMISER STATE + new_params = Params(actor_new_params, critic_new_params) + new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) + # PACK LOSS INFO + total_loss = actor_loss_info[0] + critic_loss_info[0] + value_loss = critic_loss_info[1] + actor_loss = actor_loss_info[1][0] + entropy = actor_loss_info[1][1] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + return (new_params, new_opt_state, entropy_key), loss_info + + params, opt_states, traj_batch, advantages, targets, key = update_state + key, shuffle_key, entropy_key = jax.random.split(key, 3) + # SHUFFLE MINIBATCHES + batch_size = config.system.rollout_length * (config.arch.num_envs // len(config.arch.learner_device_ids)) * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor + permutation = jax.random.permutation(shuffle_key, batch_size) + batch = (traj_batch, advantages, targets) + batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = jax.tree_util.tree_map( + lambda x: jnp.take(x, permutation, axis=0), batch + ) + minibatches = jax.tree_util.tree_map( + lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), + shuffled_batch, + ) + # UPDATE MINIBATCHES + (params, opt_states, entropy_key), loss_info = jax.lax.scan( + _update_minibatch, (params, opt_states, entropy_key), minibatches + ) + + update_state = (params, opt_states, traj_batch, advantages, targets, key) + return update_state, loss_info + + update_state = (params, opt_states, traj_batch, advantages, targets, key) + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, key = update_state + learner_state = LearnerState(params, opt_states, key, None, None) + metric = traj_batch.info + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + learner_state (NamedTuple): + - params (Params): The initial model parameters. + - opt_states (OptStates): The initial optimizer state. + - key (chex.PRNGKey): The random number generator state. + - env_state (LogEnvState): The environment state. + - timesteps (TimeStep): The initial timestep in the initial trajectory. + """ + + + learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones) + + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + keys: chex.Array, config: DictConfig, learner_devices: List +) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + + #create temporory envoirnments. + env = environments.make_gym_env(config, 1, add_global_state=True) + # Get number of agents and actions. + action_space = env.single_action_space + config.system.num_agents = len(action_space) + config.system.num_actions = action_space[0].n + + # PRNG keys. + key, actor_net_key, critic_net_key = keys + + # Define network and optimiser. + actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) + actor_action_head = hydra.utils.instantiate( + config.network.action_head, action_dim=config.system.num_actions + ) + critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) + + actor_network = Actor(torso=actor_torso, action_head=actor_action_head) + critic_network = Critic(torso=critic_torso, centralised_critic= True) + + actor_lr = make_learning_rate(config.system.actor_lr, config) + critic_lr = make_learning_rate(config.system.critic_lr, config) + + actor_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(actor_lr, eps=1e-5), + ) + critic_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(critic_lr, eps=1e-5), + ) + + # Initialise observation: Select only obs for a single agent. + obs, info = env.reset() + init_obs = jnp.stack(obs, axis = 1) # (num_envs, num_agents, ...) + init_mask = np.stack(info["actions_mask"]) # (num_envs, num_agents, num_actions) + init_global_obs = np.stack(info["global_obs"]) + init_x = ObservationGlobalState(init_obs, init_mask, init_global_obs) + + # Initialise actor params and optimiser state. + actor_params = actor_network.init(actor_net_key, init_x) + actor_opt_state = actor_optim.init(actor_params) + + # Initialise critic params and optimiser state. + critic_params = critic_network.init(critic_net_key, init_x) + critic_opt_state = critic_optim.init(critic_params) + + # Pack params. + params = Params(actor_params, critic_params) + + # Pack apply and update functions. + apply_fns = (actor_network.apply, critic_network.apply) + update_fns = (actor_optim.update, critic_optim.update) + + # Get batched iterated update and replicate it to pmap it over learner cores. + learn = get_learner_fn(apply_fns, update_fns, config) + learn = jax.pmap(learn, axis_name="device", devices = learner_devices) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, _ = loaded_checkpoint.restore_params(input_params=params) + # Update the params + params = restored_params + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + opt_states = OptStates(actor_opt_state, critic_opt_state) + replicate_learner = (params, opt_states, step_keys) + + # Duplicate learner across Learner devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) + + # Initialise learner state. + params, opt_states, step_keys = replicate_learner + init_learner_state = LearnerState(params, opt_states, step_keys, None, None) + env.close() + + return learn, apply_fns, init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + devices = jax.devices() + learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] + + # PRNG keys. + key, key_e, actor_net_key, critic_net_key = jax.random.split( + jax.random.PRNGKey(config.system.seed), num=4 + ) + + # Sanity check of config + assert ( + config.arch.num_envs % len(config.arch.learner_device_ids) == 0 + ), "The number of environments must to be divisible by the number of learners " + + assert ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.arch.n_threads_per_executor + % config.system.num_minibatches + == 0 + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" + + + # Setup learner. + learn, apply_fns , learner_state = learner_setup( + (key ,actor_net_key, critic_net_key), config, learner_devices + ) + + # Setup evaluator. + # One key per device for evaluation. + evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config, add_global_state=True) #todo: make this more generic + + # Calculate total timesteps. + config = sebulba_check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) + config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation + steps_per_rollout = ( + len(config.arch.executor_device_ids) + * config.arch.n_threads_per_executor + * config.system.rollout_length + * config.arch.num_envs + * config.system.num_updates_per_eval + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Executor setup and launch. + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + params_queues: List = [] + rollout_queues: List = [] + for d_idx, d_id in enumerate( # Loop through each executor device + config.arch.executor_device_ids + ): + # Replicate params per executor device + device_params = jax.device_put(unreplicated_params, devices[d_id]) + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + params_queues.append(queue.Queue(maxsize=1)) + rollout_queues.append(queue.Queue(maxsize=1)) + params_queues[-1].put(device_params) + threading.Thread( + target=rollout, + args=( + jax.device_put(key, devices[d_id]), + config, + rollout_queues[-1], + params_queues[-1], + apply_fns, + learner_devices, + d_id, + ), + ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) + + + # Run experiment for the total number of updates. + max_episode_return = jnp.float32(0.0) + best_params = None + for eval_step in range(config.arch.num_evaluation): + training_start_time = time.time() + learner_speeds = [] + rollout_times = [] + + episode_metrics = [] + train_metrics = [] + + # Make sure that the + num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates + for update in range(num_updates_in_eval): + sharded_storages = [] + sharded_next_obss = [] + sharded_next_dones = [] + + rollout_start_time = time.time() + # Loop through each executor device + for d_idx, _ in enumerate(config.arch.executor_device_ids): + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + # Get data from rollout queue + ( + sharded_storage, + sharded_next_obs, + sharded_next_done, + ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() + sharded_storages.append(sharded_storage) + sharded_next_obss.append(sharded_next_obs) + sharded_next_dones.append(sharded_next_done) + + rollout_times.append(time.time() - rollout_start_time) + + + # Concatinate the returned trajectories on the n_env axis + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) + sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) + sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) + + + learner_start_time = time.time() + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) + learner_speeds.append(time.time() - learner_start_time) + + # Stack the metrics + episode_metrics.append(learner_output.episode_metrics) + train_metrics.append(learner_output.train_metrics) + + # Send updated params to executors + unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) + for d_idx, d_id in enumerate(config.arch.executor_device_ids): + device_params = jax.device_put(unreplicated_params, devices[d_id]) + for thread_id in range(config.arch.n_threads_per_executor): + params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( + device_params + ) + + + + # Log the results of the training. + elapsed_time = time.time() - training_start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} + logger.log(speed_info , t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) + logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) + + # Evaluation on the learner + evaluation_start_timer = time.time() + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) + + # Log the results of the evaluation. + elapsed_time = time.time() - evaluation_start_timer + episode_return = jnp.mean(episode_metrics["episode_return"]) + + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(learner_output.learner_state.params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + start_time = time.time() + + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) + + elapsed_time = time.time() - start_time + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + + +@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + + # Run experiment. + eval_performance = run_experiment(cfg) + print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() + +#learner_output.episode_metrics.keys() +#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file diff --git a/mava/types.py b/mava/types.py index aa79bf5b4..c6a2cf6aa 100644 --- a/mava/types.py +++ b/mava/types.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Dict, Generic, Tuple, TypeVar +from typing import Any, Callable, Dict, Generic, Tuple, TypeVar, Optional import chex from flax.core.frozen_dict import FrozenDict @@ -37,7 +37,7 @@ class Observation(NamedTuple): agents_view: chex.Array # (num_agents, num_obs_features) action_mask: chex.Array # (num_agents, num_actions) - step_count: chex.Array # (num_agents, ) + step_count: Optional[chex.Array] = None # (num_agents, ) class ObservationGlobalState(NamedTuple): @@ -49,7 +49,7 @@ class ObservationGlobalState(NamedTuple): agents_view: chex.Array # (num_agents, num_obs_features) action_mask: chex.Array # (num_agents, num_actions) global_state: chex.Array # (num_agents, num_agents * num_obs_features) - step_count: chex.Array # (num_agents, ) + step_count: Optional[chex.Array] = None # (num_agents, ) RNNObservation: TypeAlias = Tuple[Observation, Done] diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index c23e40820..a9313bf64 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -46,10 +46,9 @@ ConnectorWrapper, GigastepWrapper, GymRecordEpisodeMetrics, - GymRwareWrapper, + GymGenericWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory, - GymLBFWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -73,7 +72,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging" : GymLBFWrapper} +_gym_registry = {"RobotWarehouse": GymGenericWrapper, "LevelBasedForaging" : GymGenericWrapper} def add_extra_wrappers( @@ -238,7 +237,6 @@ def create_gym_env( wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - num_env = config.arch.num_envs envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names [ lambda: create_gym_env(config, add_global_state) diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 64a5affec..703d85279 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymLBFWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymGenericWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 31146e29a..b329241d9 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -27,7 +27,7 @@ warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): +class GymGenericWrapper(gym.Wrapper): """Wrapper for rware gym environments""" def __init__( @@ -35,7 +35,6 @@ def __init__( env: gym.Env, use_individual_rewards: bool = False, add_global_state: bool = False, - eval_env: bool = False, ): """Initialize the gym wrapper @@ -44,17 +43,15 @@ def __init__( use_individual_rewards (bool, optional): Use individual or group rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. - eval_env (bool, optional): Weather the instance is used for training or evaluation. - Defaults to False. """ super().__init__(env) - self._env = env #not having _env leaded tp self.env getting replaced --> circular called + self._env = env self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state # todo : add the global observations + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ 0 - ].n # todo: all the agents must have the same num_actions, add assertion? + ].n def reset( self, seed: Optional[int] = None, options: Optional[dict] = None @@ -66,19 +63,24 @@ def reset( agents_view, info = self._env.reset() info = {"actions_mask": self.get_actions_mask(info)} - + if self.add_global_state: + info["global_obs"] = self.get_global_obs(agents_view) + return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: #Vect auto rest + def step(self, actions: NDArray) -> Tuple: agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} + if self.add_global_state: + info["global_obs"] = self.get_global_obs(agents_view) if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) + return agents_view, reward, terminated, truncated, info def get_actions_mask(self, info: Dict) -> NDArray: @@ -86,66 +88,14 @@ def get_actions_mask(self, info: Dict) -> NDArray: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + def get_global_obs(self, obs: NDArray): + global_obs = np.concatenate(obs, axis=0) + return np.tile(global_obs, (self.num_agents, 1)) + -class GymLBFWrapper(gym.Wrapper): - """Wrapper for rware gym environments""" - - def __init__( - self, - env: gym.Env, - use_individual_rewards: bool = False, - add_global_state: bool = False, - ): - """Initialize the gym wrapper - - Args: - env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. - Defaults to False. - add_global_state (bool, optional) : Create global observations. Defaults to False. - """ - super().__init__(env) - self._env = env #not having _env leaded tp self.env getting replaced --> circular called - self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state # todo : add the global observations - self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[ - 0 - ].n # todo: all the agents must have the same num_actions, add assertion? - - def reset( - self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple: - - if seed is not None: - self.env.seed(seed) - - agents_view, info = self._env.reset() - - info = {"actions_mask": self.get_actions_mask(info)} - - return np.array(agents_view), info - - def step(self, actions: NDArray) -> Tuple: #Vect auto rest - - agents_view, reward, terminated, truncated, info = self._env.step(actions) - - info = {"actions_mask": self.get_actions_mask(info)} - - if self.use_individual_rewards: - reward = np.array(reward) - else: - reward = np.array([np.array(reward).mean()] * self.num_agents) - - truncated = [truncated] * self.num_agents - return agents_view, reward, terminated, truncated, info - def get_actions_mask(self, info: Dict) -> NDArray: - if "action_mask" in info: - return np.array(info["action_mask"]) - return np.ones((self.num_agents, self.num_actions), dtype=np.float32) class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" From 7044fbef5b423b8a65c554ef746669a8d921c144 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 8 Jul 2024 14:54:51 +0100 Subject: [PATCH 027/139] fix: removed the sebulba spesifique types --- mava/systems/sebulba/ppo/types.py | 101 ------------------------------ 1 file changed, 101 deletions(-) delete mode 100644 mava/systems/sebulba/ppo/types.py diff --git a/mava/systems/sebulba/ppo/types.py b/mava/systems/sebulba/ppo/types.py deleted file mode 100644 index c27dcace5..000000000 --- a/mava/systems/sebulba/ppo/types.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict - -import chex -from flax.core.frozen_dict import FrozenDict -from jumanji.types import TimeStep -from optax._src.base import OptState -from typing_extensions import NamedTuple - -from mava.types import Action, Done, HiddenState, State, Value - - -class Params(NamedTuple): - """Parameters of an actor critic network.""" - - actor_params: FrozenDict - critic_params: FrozenDict - - -class OptStates(NamedTuple): - """OptStates of actor critic learner.""" - - actor_opt_state: OptState - critic_opt_state: OptState - - -class HiddenStates(NamedTuple): - """Hidden states for an actor critic learner.""" - - policy_hidden_state: HiddenState - critic_hidden_state: HiddenState - - -class LearnerState(NamedTuple): - """State of the learner.""" - - params: Params - opt_states: OptStates - key: chex.PRNGKey - env_state: State - timestep: TimeStep - - -class RNNLearnerState(NamedTuple): - """State of the `Learner` for recurrent architectures.""" - - params: Params - opt_states: OptStates - key: chex.PRNGKey - env_state: State - timestep: TimeStep - dones: Done - hstates: HiddenStates - - -class PPOTransition(NamedTuple): - """Transition tuple for PPO.""" - - done: Done - action: Action - value: Value - reward: chex.Array - log_prob: chex.Array - obs: chex.Array - info: Dict - - -class RNNPPOTransition(NamedTuple): - """Transition tuple for PPO.""" - - done: Done - action: Action - value: Value - reward: chex.Array - log_prob: chex.Array - obs: chex.Array - hstates: HiddenStates - info: Dict - - -class Observation(NamedTuple): - """The observation that the agent sees. - agents_view: the agent's view of the environment. - action_mask: boolean array specifying, for each agent, which action is legal. - """ - - agents_view: chex.Array # (num_agents, num_obs_features) - action_mask: chex.Array # (num_agents, num_actions) From 9433f2eb0180d97ab0f87fef7ac87327bf5f40cf Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 09:09:05 +0100 Subject: [PATCH 028/139] feat: ff_mappo and rec_ippo in sebulba --- mava/configs/arch/sebulba.yaml | 6 +- mava/configs/default_ff_mappo_seb.yaml | 7 + mava/configs/default_rec_ippo_seb.yaml | 7 + mava/configs/system/ppo/ff_ippo.yaml | 6 +- mava/evaluator.py | 88 ++- mava/systems/sebulba/ppo/ff_ippo.py | 11 +- mava/systems/sebulba/ppo/ff_mappo.py | 4 +- mava/systems/sebulba/ppo/rec_ippo.py | 850 +++++++++++++++++++++++++ 8 files changed, 960 insertions(+), 19 deletions(-) create mode 100644 mava/configs/default_ff_mappo_seb.yaml create mode 100644 mava/configs/default_rec_ippo_seb.yaml create mode 100644 mava/systems/sebulba/ppo/rec_ippo.py diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 617e54134..fd555f71e 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,18 +1,18 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 3 # number of envs per thread +num_envs: 32 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. -num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. +num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- -n_threads_per_executor: 2 # num of different threads/env batches per actor +n_threads_per_executor: 1 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices diff --git a/mava/configs/default_ff_mappo_seb.yaml b/mava/configs/default_ff_mappo_seb.yaml new file mode 100644 index 000000000..8d96d3e97 --- /dev/null +++ b/mava/configs/default_ff_mappo_seb.yaml @@ -0,0 +1,7 @@ +defaults: + - logger: ff_mappo + - arch: sebulba + - system: ppo/ff_mappo + - network: mlp + - env: gym + - _self_ diff --git a/mava/configs/default_rec_ippo_seb.yaml b/mava/configs/default_rec_ippo_seb.yaml new file mode 100644 index 000000000..61eaa95f1 --- /dev/null +++ b/mava/configs/default_rec_ippo_seb.yaml @@ -0,0 +1,7 @@ +defaults: + - logger: rec_ippo + - arch: sebulba + - system: ppo/rec_ippo + - network: rnn + - env: gym + - _self_ diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index 0c93c2683..c80b43ec8 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -2,15 +2,15 @@ total_timesteps: ~ # Set the total environment steps. # If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. -num_updates: 12 # Number of updates +num_updates: 1000 # Number of updates seed: 42 # --- Agent observations --- add_agent_id: True # --- RL hyperparameters --- -actor_lr: 1.0e-3 # Learning rate for actor network -critic_lr: 1.0e-3 # Learning rate for critic network +actor_lr: 0.0005 # Learning rate for actor network +critic_lr: 0.0005 # Learning rate for critic network update_batch_size: 2 # Number of vectorised gradient updates per device. rollout_length: 128 # Number of environment steps per vectorised environment. ppo_epochs: 4 # Number of ppo epochs per training data batch. diff --git a/mava/evaluator.py b/mava/evaluator.py index f44a8d55b..ca0c8c9a7 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -145,7 +145,7 @@ def evaluator_fn(trained_params: FrozenDict, key: chex.PRNGKey) -> ExperimentOut return evaluator_fn -def get_rnn_evaluator_fn( +def get_anakin_rnn_evaluator_fn( env: Environment, apply_fn: RecActorApply, config: DictConfig, @@ -314,14 +314,14 @@ def make_anakin_eval_fns( # Vmap it over number of agents and create evaluator_fn. if use_recurrent_net: assert scanned_rnn is not None - evaluator = get_rnn_evaluator_fn( + evaluator = get_anakin_rnn_evaluator_fn( eval_env, network_apply_fn, # type: ignore config, scanned_rnn, log_win_rate, ) - absolute_metric_evaluator = get_rnn_evaluator_fn( + absolute_metric_evaluator = get_anakin_rnn_evaluator_fn( eval_env, network_apply_fn, # type: ignore config, @@ -374,9 +374,10 @@ def get_action( #todo explicetly put these on the learner? they should already b return action def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: - dones = np.zeros(env.num_envs) # todo: jnp or np? + obs, info = env.reset() + dones = np.zeros(env.num_envs) # todo: jnp or np? eval_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) while not dones.all(): @@ -405,6 +406,81 @@ def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: return eval_episodes +def get_sebulba_rnn_evaluator_fn( + env: Environment, + apply_fn: RecActorApply, + config: DictConfig, + scanned_rnn: nn.Module, + log_win_rate: bool = False, +) -> EvalFn: + """Get the evaluator function for feedforward networks. + + Args: + env (Environment): An evironment instance for evaluation. + apply_fn (callable): Network forward pass method. + config (dict): Experiment configuration. + """ + @jax.jit + def get_action( #todo explicetly put these on the learner? they should already be there + params: FrozenDict, + observation: Observation, + hstate : chex.Array, + key: chex.PRNGKey, + ) -> Tuple: + """Get action.""" + + hstate, pi = apply_fn(params, hstate, observation) + + if config.arch.evaluation_greedy: + action = pi.mode() + else: + action = pi.sample(seed=key) + + return action, hstate + def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: + + + + obs, info = env.reset() + eval_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) + + hstate = scanned_rnn.initialize_carry( + (env.num_envs, config.system.num_agents), config.network.hidden_state_dim + ) + + dones = jnp.zeros((env.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + + while not dones.all(): + + key, policy_key = jax.random.split(key) + + obs = jax.device_put(jnp.stack(obs, axis = 1)) + action_mask = jax.device_put(np.stack(info["actions_mask"]) ) + + obs, action_mask, dones = jax.tree_map(lambda x : x[jnp.newaxis, :], (obs, action_mask, dones)) + + + actions, hstate = get_action(params, (Observation(obs, action_mask), dones), hstate, policy_key) + cpu_action = jax.device_get(actions) + + obs, reward, terminated, truncated, info = env.step(cpu_action[0].swapaxes(0,1)) + + next_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) + + next_dones = np.logical_or(terminated, truncated) + + per_env_done = np.all(np.logical_and(next_dones, dones[0] == False),axis = 1) + + update_metric = lambda old_metric, new_metric : np.where(per_env_done, new_metric, old_metric) + eval_metrics = jax.tree_map(update_metric, eval_metrics, next_metrics) + + dones = np.logical_or(dones, next_dones) + eval_metrics.pop("is_terminal_step") + + return eval_metrics + + return eval_episodes + def make_sebulba_eval_fns( eval_env_fn: callable, @@ -438,14 +514,14 @@ def make_sebulba_eval_fns( # Vmap it over number of agents and create evaluator_fn. if use_recurrent_net: assert scanned_rnn is not None - evaluator = get_rnn_evaluator_fn( + evaluator = get_sebulba_rnn_evaluator_fn( eval_env, network_apply_fn, # type: ignore config, scanned_rnn, log_win_rate, ) - absolute_metric_evaluator = get_rnn_evaluator_fn( + absolute_metric_evaluator = get_sebulba_rnn_evaluator_fn( absolute_eval_env, network_apply_fn, # type: ignore config, diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 30e5bacbf..153f9e4a9 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -74,7 +74,7 @@ def get_action_and_value( """Get action and value.""" key, subkey = jax.random.split(key) - actor_policy = actor_apply_fn(params.actor_params, observation) + actor_policy = actor_apply_fn(params.actor_params, observation) # TODO: check vmapiing action = actor_policy.sample(seed=subkey) log_prob = actor_policy.log_prob(action) @@ -114,6 +114,7 @@ def get_action_and_value( cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) + full_observation = Observation(cached_next_obs, cashed_action_mask) # Get action and value inference_time_start = time.time() ( @@ -121,7 +122,7 @@ def get_action_and_value( log_prob, value, key, - ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), key) + ) = get_action_and_value(params, full_observation, key) # Step the environment @@ -144,7 +145,7 @@ def get_action_and_value( value=value, reward=next_reward, log_prob=log_prob, - obs=Observation(cached_next_obs, cashed_action_mask), + obs=full_observation, info=metrics, ) ) @@ -206,7 +207,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: Observation, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -421,7 +422,7 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs - timesteps (TimeStep): The initial timestep in the initial trajectory. """ - + # todo: add update_batch_size learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones) return ExperimentOutput( diff --git a/mava/systems/sebulba/ppo/ff_mappo.py b/mava/systems/sebulba/ppo/ff_mappo.py index 5f84fd0d0..66d4174bf 100644 --- a/mava/systems/sebulba/ppo/ff_mappo.py +++ b/mava/systems/sebulba/ppo/ff_mappo.py @@ -210,7 +210,7 @@ def get_learner_fn( actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: ObservationGlobalState, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -749,7 +749,7 @@ def run_experiment(_config: DictConfig) -> float: -@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_mappo_seb.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/sebulba/ppo/rec_ippo.py b/mava/systems/sebulba/ppo/rec_ippo.py new file mode 100644 index 000000000..6e204fb21 --- /dev/null +++ b/mava/systems/sebulba/ppo/rec_ippo.py @@ -0,0 +1,850 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from typing import Any, Dict, Tuple, List +import threading +import chex +import flax +import hydra +import jax +import jax.debug +import jax.numpy as jnp +import numpy as np +import optax +import queue +from collections import deque +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from omegaconf import DictConfig, OmegaConf +from optax._src.base import OptState +from rich.pretty import pprint + +from mava.evaluator import make_sebulba_eval_fns as make_eval_fns +from mava.networks import RecurrentActor as Actor +from mava.networks import RecurrentValueNet as Critic +from mava.networks import ScannedRNN +from mava.systems.anakin.ppo.types import ( + HiddenStates, + OptStates, + Params, + RNNLearnerState, + RNNPPOTransition, +) +from mava.types import ExperimentOutput, LearnerFn, RecActorApply, RecCriticApply, RNNObservation, Observation +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + merge_leading_dims, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.total_timestep_checker import sebulba_check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def rollout( + key: chex.PRNGKey, + config: DictConfig, + rollout_queue: queue.Queue, + params_queue: queue.Queue, + apply_fns: Tuple, + learner_devices: List, + actor_device_id : int, + init_hstates : HiddenStates): + + #setup + + env = environments.make_gym_env(config, config.arch.num_envs) + current_actor_device = jax.devices()[actor_device_id] + actor_apply_fn, critic_apply_fn = apply_fns + + # Define the util functions: select action function and prepare data to share it with learner. + @jax.jit + def get_action_and_value( + params: FrozenDict, + observation: RNNObservation, + last_hstates : HiddenStates, + key: chex.PRNGKey, + ) -> Tuple: + """Get action and value.""" + key, subkey = jax.random.split(key) + + policy_hidden_state, actor_policy = actor_apply_fn(params.actor_params, last_hstates.policy_hidden_state, observation) + action = actor_policy.sample(seed=subkey) + log_prob = actor_policy.log_prob(action) + + critic_hidden_state, value = critic_apply_fn(params.critic_params, last_hstates.critic_hidden_state, observation) + hastates = HiddenStates(policy_hidden_state, critic_hidden_state) + return action, log_prob, value, key, hastates + + # Define queues to track time + params_queue_get_time: deque = deque(maxlen=1) + rollout_time: deque = deque(maxlen=1) + rollout_queue_put_time: deque = deque(maxlen=1) + + next_obs , info = env.reset() + next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + next_hstates = init_hstates + move_to_device = lambda x : jax.device_put(x, device = current_actor_device) + + # Loop till the learner has finished training + for update in range(config.system.num_updates): + inference_time: float = 0 + storage_time: float = 0 + env_send_time: float = 0 + + # Get the latest parameters from the learner + params_queue_get_time_start = time.time() + params = params_queue.get() + params_queue_get_time.append(time.time() - params_queue_get_time_start) + + # Rollout + rollout_time_start = time.time() + storage: List = [] + + # Loop over the rollout length + for _ in range(0, config.system.rollout_length): + + # Cached for transition + cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) + cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) + cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) + + # Add the sequence_len dim + cached_next_obs, cached_next_dones, cashed_action_mask = jax.tree_map(lambda x: x[jnp.newaxis, : ], (cached_next_obs, cached_next_dones, cashed_action_mask)) + + full_observation = Observation(cached_next_obs, cashed_action_mask) + full_observation_dones = (full_observation, cached_next_dones) + cashed_next_hstate = move_to_device(next_hstates) + # Get action and value + inference_time_start = time.time() + ( + action, + log_prob, + value, + key, + next_hstates + ) = get_action_and_value(params, full_observation_dones, cashed_next_hstate, key) + + + # Step the environment + inference_time += time.time() - inference_time_start + env_send_time_start = time.time() + cpu_action = jax.device_get(action) + next_obs, next_reward, terminated, truncated, info = env.step(cpu_action[0].swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) + env_send_time += time.time() - env_send_time_start + + # Prepare the data + storage_time_start = time.time() + next_dones = np.logical_or(terminated, truncated) + metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics + + # Append data to storage + storage.append( + RNNPPOTransition( + done=cached_next_dones[0], + action=action[0], + value=value[0], + reward=next_reward, + log_prob=log_prob[0], + obs=Observation(cached_next_obs[0], cashed_action_mask[0]), + hstates=cashed_next_hstate, + info=metrics, + ) + ) + storage_time += time.time() - storage_time_start + rollout_time.append(time.time() - rollout_time_start) + + parse_timer = time.time() + + # Prepare data to share with learner + #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] + stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) + + # Split the arrays over the different learner_devices on the num_envs axis + shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) + + sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) + + # (num_learner_devices, num_envs, num_agents, ...) + sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) + sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) + sharded_next_done = shard_split_payload(next_dones, 0) + sharded_next_hstate = jax.tree_map( lambda x: shard_split_payload(x,0), next_hstates) + + # Pack the obs and action mask + payload_obs_dones = (Observation(sharded_next_obs, sharded_next_action_mask), cached_next_dones) + + # For debugging + speed_info = { + "rollout_time": np.mean(rollout_time), + "params_queue_get_time": np.mean(params_queue_get_time), + "action_inference": inference_time, + "storage_time": storage_time, + "env_step_time": env_send_time, + "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, + "parse_time" : time.time() - parse_timer, + } + #print(speed_info) + + payload = ( + sharded_storage, + payload_obs_dones, + sharded_next_done, + sharded_next_hstate + ) + + # Put data in the rollout queue to share it with the learner + rollout_queue_put_time_start = time.time() + rollout_queue.put(payload) + rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) + + +def get_learner_fn( + apply_fns: Tuple[ RecActorApply, RecCriticApply], + update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], + config: DictConfig, +) -> LearnerFn[RNNLearnerState]: + """Get the learner function.""" + + # Get apply and update functions for actor and critic networks. + actor_apply_fn, critic_apply_fn = apply_fns + actor_update_fn, critic_update_fn = update_fns + + def _update_step(learner_state: RNNLearnerState, traj_batch : RNNPPOTransition, last_obs: RNNObservation, last_dones : chex.Array, last_hstate : HiddenStates) -> Tuple[RNNLearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + learner_state (NamedTuple): + - params (Params): The current model parameters. + - opt_states (OptStates): The current optimizer states. + - key (PRNGKey): The random number generator state. + - env_state (State): The environment state. + - last_timestep (TimeStep): The last timestep in the current trajectory. + _ (Any): The current metrics info. + """ + + def _calculate_gae( #todo: lake sure this is appropriate + traj_batch: RNNPPOTransition, last_val: chex.Array, last_done: chex.Array + ) -> Tuple[chex.Array, chex.Array]: + def _get_advantages( + carry: Tuple[chex.Array, chex.Array, chex.Array], transition: RNNPPOTransition + ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: + gae, next_value, next_done = carry + done, value, reward = transition.done, transition.value, transition.reward + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - next_done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae + return (gae, value, done), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(last_val), last_val, last_done), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + # CALCULATE ADVANTAGE + params, opt_states, key, _, _, _, _ = learner_state + last_obs = jax.tree_map(lambda x: x[jnp.newaxis, : ], last_obs) + last_dones = last_dones[jnp.newaxis, :] + + + _, last_val = critic_apply_fn(params.critic_params, last_hstate.critic_hidden_state, last_obs) + + advantages, targets = _calculate_gae(traj_batch, last_val[0], last_dones[0]) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_states, key = train_state + traj_batch, advantages, targets = batch_info + + def _actor_loss_fn( + actor_params: FrozenDict, + actor_opt_state: OptState, + traj_batch: RNNPPOTransition, + gae: chex.Array, + key: chex.PRNGKey, + ) -> Tuple: + """Calculate the actor loss.""" + # RERUN NETWORK + + obs_and_done = (traj_batch.obs, traj_batch.done) + _, actor_policy = actor_apply_fn( + actor_params, traj_batch.hstates.policy_hidden_state[0], obs_and_done + ) + log_prob = actor_policy.log_prob(traj_batch.action) + + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + # The seed will be used in the TanhTransformedDistribution: + entropy = actor_policy.entropy(seed=key).mean() + + total_loss = loss_actor - config.system.ent_coef * entropy + return total_loss, (loss_actor, entropy) + + def _critic_loss_fn( + critic_params: FrozenDict, + critic_opt_state: OptState, + traj_batch: RNNPPOTransition, + targets: chex.Array, + ) -> Tuple: + """Calculate the critic loss.""" + # RERUN NETWORK + obs_and_done = (traj_batch.obs, traj_batch.done) + _, value = critic_apply_fn( + critic_params, traj_batch.hstates.critic_hidden_state[0], obs_and_done + ) + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + value_losses = jnp.square(value - targets) + value_losses_clipped = jnp.square(value_pred_clipped - targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + total_loss = config.system.vf_coef * value_loss + return total_loss, (value_loss) + + # CALCULATE ACTOR LOSS + key, entropy_key = jax.random.split(key) + actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) + actor_loss_info, actor_grads = actor_grad_fn( + params.actor_params, + opt_states.actor_opt_state, + traj_batch, + advantages, + entropy_key, + ) + + # CALCULATE CRITIC LOSS + critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) + critic_loss_info, critic_grads = critic_grad_fn( + params.critic_params, opt_states.critic_opt_state, traj_batch, targets + ) + + # Compute the parallel mean (pmean) over the batch. + # This calculation is inspired by the Anakin architecture demo notebook. + # available at https://tinyurl.com/26tdzs5x + # pmean over devices. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="device" + ) + # pmean over devices. + critic_grads, critic_loss_info = jax.lax.pmean( + (critic_grads, critic_loss_info), axis_name="device" + ) + + # UPDATE ACTOR PARAMS AND OPTIMISER STATE + actor_updates, actor_new_opt_state = actor_update_fn( + actor_grads, opt_states.actor_opt_state + ) + actor_new_params = optax.apply_updates(params.actor_params, actor_updates) + + # UPDATE CRITIC PARAMS AND OPTIMISER STATE + critic_updates, critic_new_opt_state = critic_update_fn( + critic_grads, opt_states.critic_opt_state + ) + critic_new_params = optax.apply_updates(params.critic_params, critic_updates) + + new_params = Params(actor_new_params, critic_new_params) + new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) + + # PACK LOSS INFO + total_loss = actor_loss_info[0] + critic_loss_info[0] + value_loss = critic_loss_info[1] + actor_loss = actor_loss_info[1][0] + entropy = actor_loss_info[1][1] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + + return (new_params, new_opt_state, entropy_key), loss_info + + params, opt_states, traj_batch, advantages, targets, key = update_state + key, shuffle_key, entropy_key = jax.random.split(key, 3) + + # SHUFFLE MINIBATCHES + batch = (traj_batch, advantages, targets) + num_recurrent_chunks = ( + config.system.rollout_length // config.system.recurrent_chunk_size + ) + batch = jax.tree_util.tree_map( + lambda x: x.reshape( + config.system.recurrent_chunk_size, + config.arch.num_envs * num_recurrent_chunks, + *x.shape[2:], + ), + batch, + ) + permutation = jax.random.permutation( + shuffle_key, config.arch.num_envs * num_recurrent_chunks + ) + shuffled_batch = jax.tree_util.tree_map( + lambda x: jnp.take(x, permutation, axis=1), batch + ) + reshaped_batch = jax.tree_util.tree_map( + lambda x: jnp.reshape( + x, (x.shape[0], config.system.num_minibatches, -1, *x.shape[2:]) + ), + shuffled_batch, + ) + minibatches = jax.tree_util.tree_map(lambda x: jnp.swapaxes(x, 1, 0), reshaped_batch) + + # UPDATE MINIBATCHES + (params, opt_states, entropy_key), loss_info = jax.lax.scan( + _update_minibatch, (params, opt_states, entropy_key), minibatches + ) + + update_state = ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + ) + return update_state, loss_info + + update_state = (params, opt_states, traj_batch, advantages, targets, key) + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, key = update_state + learner_state = RNNLearnerState(params, opt_states, key, None, None, None, None) + metric = traj_batch.info + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: RNNLearnerState, traj_batch : RNNPPOTransition, last_obs: chex.Array, last_dones : chex.Array, last_hstate : chex.Array) -> ExperimentOutput[RNNLearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + learner_state (NamedTuple): + - params (Params): The initial model parameters. + - opt_states (OptStates): The initial optimizer state. + - key (chex.PRNGKey): The random number generator state. + - env_state (LogEnvState): The environment state. + - timesteps (TimeStep): The initial timestep in the initial trajectory. + """ + + + learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones, last_hstate) + + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + keys: chex.Array, config: DictConfig, learner_devices: List +) -> Tuple[LearnerFn[RNNLearnerState], Actor, RNNLearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + + #create temporory envoirnments. + env = environments.make_gym_env(config, 1) + # Get number of agents and actions. + action_space = env.single_action_space + config.system.num_agents = len(action_space) + config.system.num_actions = action_space[0].n + + # PRNG keys. + key, actor_net_key, critic_net_key = keys + + # Define network and optimisers. + actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) + actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) + actor_action_head = hydra.utils.instantiate( + config.network.action_head, action_dim=config.system.num_actions + ) + critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) + critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) + + actor_network = Actor( + pre_torso=actor_pre_torso, + post_torso=actor_post_torso, + action_head=actor_action_head, + hidden_state_dim=config.network.hidden_state_dim, + ) + critic_network = Critic( + pre_torso=critic_pre_torso, + post_torso=critic_post_torso, + hidden_state_dim=config.network.hidden_state_dim, + ) + + actor_lr = make_learning_rate(config.system.actor_lr, config) + critic_lr = make_learning_rate(config.system.critic_lr, config) + + actor_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(actor_lr, eps=1e-5), + ) + critic_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(critic_lr, eps=1e-5), + ) + + # Initialise observation: Select only obs for a single agent. + init_obs = jnp.array([[env.single_observation_space.sample()]]) + init_action_mask = jnp.ones((config.system.num_agents, config.system.num_actions)) + init_dones = jnp.zeros((1, 1, config.system.num_agents), dtype=jax.numpy.bool_) + init_x = (Observation(init_obs, init_action_mask), init_dones) + + # Initialise hidden states. + init_policy_hstate = ScannedRNN.initialize_carry( + (config.arch.num_envs, config.system.num_agents), config.network.hidden_state_dim + ) + init_critic_hstate = ScannedRNN.initialize_carry( + (config.arch.num_envs, config.system.num_agents), config.network.hidden_state_dim + ) + + # initialise params and optimiser state. + actor_params = actor_network.init(actor_net_key, init_policy_hstate, init_x) + actor_opt_state = actor_optim.init(actor_params) + critic_params = critic_network.init(critic_net_key, init_critic_hstate, init_x) + critic_opt_state = critic_optim.init(critic_params) + + # Get network apply functions and optimiser updates. + apply_fns = (actor_network.apply, critic_network.apply) + update_fns = (actor_optim.update, critic_optim.update) + + # Get batched iterated update and replicate it to pmap it over learner cores. + learn = get_learner_fn(apply_fns, update_fns, config) + learn = jax.pmap(learn, axis_name="device", devices = learner_devices) + + # Pack params and initial states. + params = Params(actor_params, critic_params) + hstates = HiddenStates(init_policy_hstate, init_critic_hstate) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, restored_hstates = loaded_checkpoint.restore_params( + input_params=params, restore_hstates=True, THiddenState=HiddenStates + ) + # Update the params and hstates + params = restored_params + hstates = restored_hstates if restored_hstates else hstates + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + opt_states = OptStates(actor_opt_state, critic_opt_state) + replicate_learner = (params, opt_states, hstates, step_keys) + + # Duplicate learner across Learner devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) + + # Initialise learner state. + params, opt_states, hstates, step_keys = replicate_learner + init_learner_state = RNNLearnerState(params, opt_states, step_keys, None, None, init_dones, hstates) + env.close() + + return learn, apply_fns, init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + devices = jax.devices() + learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] + + # PRNG keys. + key, key_e, actor_net_key, critic_net_key = jax.random.split( + jax.random.PRNGKey(config.system.seed), num=4 + ) + + # Sanity check of config + if config.system.recurrent_chunk_size is None: + config.system.recurrent_chunk_size = config.system.rollout_length + else: + assert ( + config.system.rollout_length % config.system.recurrent_chunk_size == 0 + ), "Rollout length must be divisible by recurrent chunk size." + assert ( + config.arch.num_envs % len(config.arch.learner_device_ids) == 0 + ), "The number of environments must to be divisible by the number of learners " + + assert ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.arch.n_threads_per_executor + % config.system.num_minibatches + == 0 + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" + + + # Setup learner. + learn, apply_fns , learner_state = learner_setup( + (key ,actor_net_key, critic_net_key), config, learner_devices + ) + + # Setup evaluator. + # One key per device for evaluation. + evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config,use_recurrent_net = True, scanned_rnn = ScannedRNN) #todo: make this more generic + + # Calculate total timesteps. + config = sebulba_check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) + config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation + steps_per_rollout = ( + len(config.arch.executor_device_ids) + * config.arch.n_threads_per_executor + * config.system.rollout_length + * config.arch.num_envs + * config.system.num_updates_per_eval + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Executor setup and launch. + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + unreplicated_hstates = flax.jax_utils.unreplicate(learner_state.hstates) + params_queues: List = [] + rollout_queues: List = [] + for d_idx, d_id in enumerate( # Loop through each executor device + config.arch.executor_device_ids + ): + # Replicate params per executor device + device_params = jax.device_put(unreplicated_params, devices[d_id]) + device_hstates = jax.device_put(unreplicated_hstates, devices[d_id]) + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + params_queues.append(queue.Queue(maxsize=1)) + rollout_queues.append(queue.Queue(maxsize=1)) + params_queues[-1].put(device_params) + threading.Thread( + target=rollout, + args=( + jax.device_put(key, devices[d_id]), + config, + rollout_queues[-1], + params_queues[-1], + apply_fns, + learner_devices, + d_id, + device_hstates, + ), + ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) + + # Run experiment for the total number of updates. + max_episode_return = jnp.float32(0.0) + best_params = None + for eval_step in range(config.arch.num_evaluation): + training_start_time = time.time() + learner_speeds = [] + rollout_times = [] + + episode_metrics = [] + train_metrics = [] + + # Make sure that the + num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates + for update in range(num_updates_in_eval): + sharded_storages = [] + sharded_next_obss = [] + sharded_next_dones = [] + sharded_next_hstates = [] + + rollout_start_time = time.time() + # Loop through each executor device + for d_idx, _ in enumerate(config.arch.executor_device_ids): + # Loop through each executor thread + for thread_id in range(config.arch.n_threads_per_executor): + # Get data from rollout queue + ( + sharded_storage, + sharded_next_obs, + sharded_next_done, + sharded_next_hstate, + ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() + sharded_storages.append(sharded_storage) + sharded_next_obss.append(sharded_next_obs) + sharded_next_dones.append(sharded_next_done) + sharded_next_hstates.append(sharded_next_hstate) + + rollout_times.append(time.time() - rollout_start_time) + + + # Concatinate the returned trajectories on the n_env axis + sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) + sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) + sharded_next_hstates = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_hstates) + + sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) + + learner_start_time = time.time() + learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones, sharded_next_hstates) + learner_speeds.append(time.time() - learner_start_time) + + # Stack the metrics + episode_metrics.append(learner_output.episode_metrics) + train_metrics.append(learner_output.train_metrics) + + # Send updated params to executors + unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) + for d_idx, d_id in enumerate(config.arch.executor_device_ids): + device_params = jax.device_put(unreplicated_params, devices[d_id]) + for thread_id in range(config.arch.n_threads_per_executor): + params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( + device_params + ) + + + + # Log the results of the training. + elapsed_time = time.time() - training_start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} + logger.log(speed_info , t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) + logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) + + # Evaluation on the learner + evaluation_start_timer = time.time() + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) + + # Log the results of the evaluation. + elapsed_time = time.time() - evaluation_start_timer + episode_return = jnp.mean(episode_metrics["episode_return"]) + + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(learner_output.learner_state.params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + start_time = time.time() + + key_e, eval_key = jax.random.split(key_e, 2) + episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) + + elapsed_time = time.time() - start_time + steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time + logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + + +@hydra.main(config_path="../../../configs", config_name="default_rec_ippo_seb.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + + # Run experiment. + eval_performance = run_experiment(cfg) + print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() + +#learner_output.episode_metrics.keys() +#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file From 627215d2943899fc6d8ed58cbbece640a21b1d39 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 09:16:21 +0100 Subject: [PATCH 029/139] fix: removed the lbf import/wrapper --- mava/utils/make_env.py | 4 ++-- mava/wrappers/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index a9313bf64..df769d8c7 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -46,7 +46,7 @@ ConnectorWrapper, GigastepWrapper, GymRecordEpisodeMetrics, - GymGenericWrapper, + GymRwareWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory, LbfWrapper, @@ -72,7 +72,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymGenericWrapper, "LevelBasedForaging" : GymGenericWrapper} +_gym_registry = {"RobotWarehouse": GymRwareWrapper} def add_extra_wrappers( diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 703d85279..4a4eb6ed0 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymGenericWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, From c3b405dda78b59c6a5f948d5df1812917aac1edd Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 09:27:46 +0100 Subject: [PATCH 030/139] chore: clean up & updated the code to match the sebulba-ff-ippo branch --- mava/configs/arch/sebulba.yaml | 11 +- mava/configs/env/gym.yaml | 1 + mava/systems/sebulba/ppo/test.py | 50 ------- mava/systems/sebulba/ppo/types.py | 100 -------------- mava/utils/make_env.py | 29 ++-- mava/wrappers/__init__.py | 4 +- mava/wrappers/gym.py | 213 ++++++++++++++++++++++-------- 7 files changed, 177 insertions(+), 231 deletions(-) delete mode 100644 mava/systems/sebulba/ppo/test.py delete mode 100644 mava/systems/sebulba/ppo/types.py diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 98cd4d96d..cbe3f4b52 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,6 @@ # --- Sebulba config --- arch_name: "sebulba" -num_envs: 16 # number of envs per thread +num_envs: 32 # number of envs per thread # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select @@ -14,11 +14,4 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # --- Sebulba devices config --- n_threads_per_executor: 1 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices -learner_device_ids: [0] # ids of learner devices - -# --- Sebulba rollout and env config --- -concurrency: False # whether actor and learner should run concurrently -async_envs: True # "whether to use async vector or sync vector envs" - -# --- To be defined during training --- -log_frequency: ~ +learner_device_ids: [0] # ids of learner devices \ No newline at end of file diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml index ad8d16b9a..1e197a45e 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym.yaml @@ -15,6 +15,7 @@ implicit_agent_id: False # environments have a winrate metric. log_win_rate: False +# Weather or not to average the returned rewards over all of the agents. use_individual_rewards: True kwargs: diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py deleted file mode 100644 index b868f69b6..000000000 --- a/mava/systems/sebulba/ppo/test.py +++ /dev/null @@ -1,50 +0,0 @@ - -import copy -import time -from typing import Any, Dict, Tuple, List -import threading -import chex -import flax -import hydra -import jax -import jax.numpy as jnp -import numpy as np -import optax -import queue -from collections import deque -from colorama import Fore, Style -from flax.core.frozen_dict import FrozenDict -from omegaconf import DictConfig, OmegaConf -from optax._src.base import OptState -from rich.pretty import pprint - -from mava.evaluator import make_eval_fns -from mava.networks import FeedForwardActor as Actor -from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation -from mava.utils import make_env as environments -from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import ( - merge_leading_dims, - unreplicate_batch_dim, - unreplicate_n_dims, -) -from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps -from mava.utils.training import make_learning_rate -from mava.wrappers.episode_metrics import get_final_step_metrics - - -@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") -def hydra_entry_point(cfg: DictConfig) -> float: - """Experiment entry point.""" - # Allow dynamic attributes. - OmegaConf.set_struct(cfg, False) - - env = environments.make_gym_env(cfg) - a = env.reset() - print(a) - -if __name__ == "__main__": - hydra_entry_point() \ No newline at end of file diff --git a/mava/systems/sebulba/ppo/types.py b/mava/systems/sebulba/ppo/types.py deleted file mode 100644 index 6e02aa904..000000000 --- a/mava/systems/sebulba/ppo/types.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict - -import chex -from flax.core.frozen_dict import FrozenDict -from jumanji.types import TimeStep -from optax._src.base import OptState -from typing_extensions import NamedTuple - -from mava.types import Action, Done, HiddenState, State, Value - - -class Params(NamedTuple): - """Parameters of an actor critic network.""" - - actor_params: FrozenDict - critic_params: FrozenDict - - -class OptStates(NamedTuple): - """OptStates of actor critic learner.""" - - actor_opt_state: OptState - critic_opt_state: OptState - - -class HiddenStates(NamedTuple): - """Hidden states for an actor critic learner.""" - - policy_hidden_state: HiddenState - critic_hidden_state: HiddenState - - -class LearnerState(NamedTuple): - """State of the learner.""" - - params: Params - opt_states: OptStates - key: chex.PRNGKey - env_state: State - timestep: TimeStep - - -class RNNLearnerState(NamedTuple): - """State of the `Learner` for recurrent architectures.""" - - params: Params - opt_states: OptStates - key: chex.PRNGKey - env_state: State - timestep: TimeStep - dones: Done - hstates: HiddenStates - - -class PPOTransition(NamedTuple): - """Transition tuple for PPO.""" - - done: Done - action: Action - value: Value - reward: chex.Array - log_prob: chex.Array - obs: chex.Array - info: Dict - - -class RNNPPOTransition(NamedTuple): - """Transition tuple for PPO.""" - - done: Done - action: Action - value: Value - reward: chex.Array - log_prob: chex.Array - obs: chex.Array - hstates: HiddenStates - - -class Observation(NamedTuple): - """The observation that the agent sees. - agents_view: the agent's view of the environment. - action_mask: boolean array specifying, for each agent, which action is legal. - """ - - agents_view: chex.Array # (num_agents, num_obs_features) - action_mask: chex.Array # (num_agents, num_actions) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 69fc54623..a54cafff8 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -22,6 +22,7 @@ import jumanji import matrax from gigastep import ScenarioBuilder +import lbforaging from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment from jumanji.environments.routing.cleaner.generator import ( @@ -46,6 +47,8 @@ GigastepWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, + GymAgentIDWrapper, + _multiagent_worker_shared_memory, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -69,7 +72,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"rware": GymRwareWrapper} +_gym_registry = {"RobotWarehouse": GymRwareWrapper} def add_extra_wrappers( @@ -208,38 +211,38 @@ def make_gigastep_env( def make_gym_env( - config: DictConfig, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, num_env : int, add_global_state: bool = False, ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. Args: - env_name (str): The name of the environment to create. config (Dict): The configuration of the environment. + num_env (int) : The number of parallel envs to create. add_global_state (bool): Whether to add the global state to the observation. Default False. Returns: - A tuple of the environments. + Async environments. """ - base_env_name = config.env.scenario.split(":")[0] + base_env_name = config.env.env_name wrapper = _gym_registry[base_env_name] def create_gym_env( - config: DictConfig, add_global_state: bool = False, eval_env: bool = False + config: DictConfig, add_global_state: bool = False ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. env = gym.make(config.env.scenario) - wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state, eval_env) + wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state) if not config.env.implicit_agent_id: - pass # todo : add agent id wrapper for gym . - env = GymRecordEpisodeMetrics(env) + wrapped_env = GymAgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . + wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - num_env = config.arch.num_envs envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names [ - lambda: create_gym_env(config, add_global_state, eval_env=eval_env) + lambda: create_gym_env(config, add_global_state) for _ in range(num_env) - ] + ], + worker=_multiagent_worker_shared_memory ) return envs @@ -267,4 +270,4 @@ def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environmen elif env_name in _gigastep_registry: return make_gigastep_env(env_name, config, add_global_state) else: - raise ValueError(f"{env_name} is not a supported environment.") + raise ValueError(f"{env_name} is not a supported environment.") \ No newline at end of file diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index e888d9317..151a1c509 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,7 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper +from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, @@ -24,4 +24,4 @@ RwareWrapper, ) from mava.wrappers.matrax import MatraxWrapper -from mava.wrappers.observation import AgentIDWrapper +from mava.wrappers.observation import AgentIDWrapper \ No newline at end of file diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 69632f1bc..041916680 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -13,17 +13,21 @@ # limitations under the License. import warnings -from typing import Dict, Tuple +from typing import Dict, Tuple, Optional import gym import numpy as np from numpy.typing import NDArray +from gym import spaces +from gym.vector.utils import write_to_shared_memory +import sys + # Filter out the warnings warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): +class GymRwareWrapper(gym.Wrapper): """Wrapper for rware gym environments""" def __init__( @@ -31,7 +35,6 @@ def __init__( env: gym.Env, use_individual_rewards: bool = False, add_global_state: bool = False, - eval_env: bool = False, ): """Initialize the gym wrapper @@ -40,109 +43,205 @@ def __init__( use_individual_rewards (bool, optional): Use individual or group rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. - eval_env (bool, optional): Weather the instance is used for training or evaluation. - Defaults to False. """ super().__init__(env) - self._env = gym.wrappers.compatibility.EnvCompatibility(env) + self._env = env self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state # todo : add the global observations - self.eval_env = eval_env + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ 0 - ].n # todo: all the agents must have the same num_actions, add assertion? - - def reset(self) -> Tuple: - (agents_view, info), _ = self._env.reset( - seed=np.random.randint(1) - ) # todo: assure reproducibility, this only works for rware - - info = {"actions_mask": self._get_actions_mask(info)} - + ].n + + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple: + + if seed is not None: + self.env.seed(seed) + + agents_view, info = self._env.reset() + + info = {"actions_mask": self.get_actions_mask(info)} + if self.add_global_state: + info["global_obs"] = self.get_global_obs(agents_view) + return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple: - agents_view, reward, terminated, truncated, info = self.env.step(actions) + agents_view, reward, terminated, truncated, info = self._env.step(actions) - done = np.logical_or(terminated, truncated).all() - - if ( - done and not self.eval_env - ): # only auto-reset in training envs, same functionality as the AutoResetWrapper. - agents_view, info = self.reset() - reward = np.zeros(self.num_agents) - terminated, truncated = np.zeros(self.num_agents, dtype=bool), np.zeros( - self.num_agents, dtype=bool - ) - return agents_view, reward, terminated, truncated, info - - info = {"actions_mask": self._get_actions_mask(info)} + info = {"actions_mask": self.get_actions_mask(info)} + if self.add_global_state: + info["global_obs"] = self.get_global_obs(agents_view) if self.use_individual_rewards: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - + return agents_view, reward, terminated, truncated, info - def _get_actions_mask(self, info: Dict) -> NDArray: + def get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + def get_global_obs(self, obs: NDArray): + global_obs = np.concatenate(obs, axis=0) + return np.tile(global_obs, (self.num_agents, 1)) + class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" def __init__(self, env: gym.Env): super().__init__(env) + self._env = env self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + self.running_count_episode_length = 0.0 def reset(self) -> Tuple: # Reset the env - agents_view, info = self.env.reset() - - # Reset the metrics - self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + agents_view, info = self._env.reset() # Create the metrics dict metrics = { "episode_return": self.running_count_episode_return, - "episode_length": self.self.running_count_episode_length, - "is_terminal_step": False, + "episode_length": self.running_count_episode_length, + "is_terminal_step": True, } + + # Reset the metrics + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0 + if "won_episode" in info: metrics["won_episode"] = info["won_episode"] + + info["metrics"] = metrics - return agents_view, metrics + return agents_view, info def step(self, actions: NDArray) -> Tuple: # Step the env - agents_view, reward, terminated, truncated, info = self.env.step(actions) - - # Update the metrics - done = np.logical_or(terminated, truncated).all() + agents_view, reward, terminated, truncated, info = self._env.step(actions) - if not done: - self.running_count_episode_return += float(np.mean(reward)) - self.running_count_episode_length += 1 - - else: - self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 + self.running_count_episode_return += float(np.mean(reward)) + self.running_count_episode_length += 1 metrics = { "episode_return": self.running_count_episode_return, - "episode_length": self.self.running_count_episode_length, - "is_terminal_step": False, + "episode_length": self.running_count_episode_length, + "is_terminal_step": False, # We handle the True case in the reset function since this gets overwritten } if "won_episode" in info: metrics["won_episode"] = info["won_episode"] + + info["metrics"] = metrics + + return agents_view, reward, terminated, truncated, info + +class GymAgentIDWrapper(gym.Wrapper): + """Add onehot agent IDs to observation.""" + + def __init__(self, env: gym.Env): + super().__init__(env) - return agents_view, reward, terminated, truncated, metrics + self.agent_ids = np.eye(self.env.num_agents) + observation_space = self.env.observation_space[0] + _obs_low, _obs_high, _obs_dtype, _obs_shape = ( + observation_space.low[0], + observation_space.high[0], + observation_space.dtype, + observation_space.shape, + ) + _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) + _observation_boxs = [spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype)] * self.env.num_agents + self.observation_space = spaces.Tuple(_observation_boxs) + + def reset(self) -> Tuple[np.ndarray, Dict]: + """Reset the environment.""" + obs, info = self.env.reset() + obs = np.concatenate([self.agent_ids, obs], axis=1) + return obs, info + + def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: + """Step the environment.""" + obs, reward, terminated, truncated, info = self.env.step(action) + obs = np.concatenate([self.agent_ids, obs], axis=1) + return obs, reward, terminated, truncated, info + + +def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + assert shared_memory is not None + env = env_fn() + observation_space = env.observation_space + parent_pipe.close() + try: + while True: + command, data = pipe.recv() + if command == "reset": + observation, info = env.reset(**data) + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, info), True)) + + elif command == "step": + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if np.logical_or(terminated, truncated).all(): + old_observation, old_info = observation, info + observation, info = env.reset() + info["final_observation"] = old_observation + info["final_info"] = old_info + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, reward, terminated, truncated, info), True)) + elif command == "seed": + env.seed(data) + pipe.send((None, True)) + elif command == "close": + pipe.send((None, True)) + break + elif command == "_call": + name, args, kwargs = data + if name in ["reset", "step", "seed", "close"]: + raise ValueError( + f"Trying to call function `{name}` with " + f"`_call`. Use `{name}` directly instead." + ) + function = getattr(env, name) + if callable(function): + pipe.send((function(*args, **kwargs), True)) + else: + pipe.send((function, True)) + elif command == "_setattr": + name, value = data + setattr(env, name, value) + pipe.send((None, True)) + elif command == "_check_spaces": + pipe.send( + ((data[0] == observation_space, data[1] == env.action_space), True) + ) + else: + raise RuntimeError( + f"Received unknown command `{command}`. Must " + "be one of {`reset`, `step`, `seed`, `close`, `_call`, " + "`_setattr`, `_check_spaces`}." + ) + except (KeyboardInterrupt, Exception): + error_queue.put((index,) + sys.exc_info()[:2]) + pipe.send((None, False)) + finally: + env.close() \ No newline at end of file From e40c5d4e2fd2ea60104f5b48201856478f8df374 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 10:01:34 +0100 Subject: [PATCH 031/139] chore : pre-commits and some comments --- mava/configs/arch/sebulba.yaml | 2 +- mava/utils/make_env.py | 18 ++++--- mava/wrappers/__init__.py | 9 +++- mava/wrappers/gym.py | 88 +++++++++++++++++----------------- 4 files changed, 61 insertions(+), 56 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index cbe3f4b52..b6a0a9699 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -14,4 +14,4 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # --- Sebulba devices config --- n_threads_per_executor: 1 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices -learner_device_ids: [0] # ids of learner devices \ No newline at end of file +learner_device_ids: [0] # ids of learner devices diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index a54cafff8..5ee4e697c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -22,7 +22,6 @@ import jumanji import matrax from gigastep import ScenarioBuilder -import lbforaging from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment from jumanji.environments.routing.cleaner.generator import ( @@ -45,16 +44,16 @@ CleanerWrapper, ConnectorWrapper, GigastepWrapper, + GymAgentIDWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, - GymAgentIDWrapper, - _multiagent_worker_shared_memory, LbfWrapper, MabraxWrapper, MatraxWrapper, RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, + _multiagent_worker_shared_memory, ) # Registry mapping environment names to their generator and wrapper classes. @@ -211,7 +210,9 @@ def make_gigastep_env( def make_gym_env( - config: DictConfig, num_env : int, add_global_state: bool = False, + config: DictConfig, + num_env: int, + add_global_state: bool = False, ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -238,11 +239,8 @@ def create_gym_env( return wrapped_env envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names - [ - lambda: create_gym_env(config, add_global_state) - for _ in range(num_env) - ], - worker=_multiagent_worker_shared_memory + [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], + worker=_multiagent_worker_shared_memory, ) return envs @@ -270,4 +268,4 @@ def make(config: DictConfig, add_global_state: bool = False) -> Tuple[Environmen elif env_name in _gigastep_registry: return make_gigastep_env(env_name, config, add_global_state) else: - raise ValueError(f"{env_name} is not a supported environment.") \ No newline at end of file + raise ValueError(f"{env_name} is not a supported environment.") diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 151a1c509..ee8fdf186 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,12 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory +from mava.wrappers.gym import ( + GymAgentIDWrapper, + GymRecordEpisodeMetrics, + GymRwareWrapper, + _multiagent_worker_shared_memory, +) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, @@ -24,4 +29,4 @@ RwareWrapper, ) from mava.wrappers.matrax import MatraxWrapper -from mava.wrappers.observation import AgentIDWrapper \ No newline at end of file +from mava.wrappers.observation import AgentIDWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 041916680..978ad4033 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -12,23 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import warnings -from typing import Dict, Tuple, Optional +from typing import Any, Callable, Dict, Optional, Tuple import gym import numpy as np -from numpy.typing import NDArray - from gym import spaces from gym.vector.utils import write_to_shared_memory -import sys +from numpy.typing import NDArray # Filter out the warnings warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): - """Wrapper for rware gym environments""" +class GymRwareWrapper(gym.Wrapper): + """Wrapper for rware gym environments.""" def __init__( self, @@ -45,30 +44,26 @@ def __init__( add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) - self._env = env + self._env = env self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[ - 0 - ].n - - def reset( - self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple: - + self.num_actions = self._env.action_space[0].n + + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + if seed is not None: self.env.seed(seed) - - agents_view, info = self._env.reset() + + agents_view, info = self._env.reset() info = {"actions_mask": self.get_actions_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) - + return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple: agents_view, reward, terminated, truncated, info = self._env.step(actions) @@ -80,7 +75,7 @@ def step(self, actions: NDArray) -> Tuple: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - + return agents_view, reward, terminated, truncated, info def get_actions_mask(self, info: Dict) -> NDArray: @@ -88,7 +83,7 @@ def get_actions_mask(self, info: Dict) -> NDArray: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - def get_global_obs(self, obs: NDArray): + def get_global_obs(self, obs: NDArray) -> NDArray: global_obs = np.concatenate(obs, axis=0) return np.tile(global_obs, (self.num_agents, 1)) @@ -113,14 +108,14 @@ def reset(self) -> Tuple: "episode_length": self.running_count_episode_length, "is_terminal_step": True, } - + # Reset the metrics self.running_count_episode_return = 0.0 self.running_count_episode_length = 0 - + if "won_episode" in info: metrics["won_episode"] = info["won_episode"] - + info["metrics"] = metrics return agents_view, info @@ -136,17 +131,18 @@ def step(self, actions: NDArray) -> Tuple: metrics = { "episode_return": self.running_count_episode_return, "episode_length": self.running_count_episode_length, - "is_terminal_step": False, # We handle the True case in the reset function since this gets overwritten + "is_terminal_step": False, } if "won_episode" in info: metrics["won_episode"] = info["won_episode"] - + info["metrics"] = metrics - + return agents_view, reward, terminated, truncated, info - + + class GymAgentIDWrapper(gym.Wrapper): - """Add onehot agent IDs to observation.""" + """Add one hot agent IDs to observation.""" def __init__(self, env: gym.Env): super().__init__(env) @@ -160,7 +156,9 @@ def __init__(self, env: gym.Env): observation_space.shape, ) _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) - _observation_boxs = [spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype)] * self.env.num_agents + _observation_boxs = [ + spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) + ] * self.env.num_agents self.observation_space = spaces.Tuple(_observation_boxs) def reset(self) -> Tuple[np.ndarray, Dict]: @@ -174,9 +172,18 @@ def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: obs, reward, terminated, truncated, info = self.env.step(action) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info - -def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + +# Copied form https://github.com/openai/gym/blob/master/gym/vector/async_vector_env.py +# Modified to work with multiple agents +def _multiagent_worker_shared_memory( # noqa: CCR001 + index: int, + env_fn: Callable[[], Any], + pipe: Any, + parent_pipe: Any, + shared_memory: Any, + error_queue: Any, +) -> None: assert shared_memory is not None env = env_fn() observation_space = env.observation_space @@ -186,9 +193,7 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me command, data = pipe.recv() if command == "reset": observation, info = env.reset(**data) - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) pipe.send(((None, info), True)) elif command == "step": @@ -199,14 +204,13 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me truncated, info, ) = env.step(data) + # Handel the dones across all of envs and agents if np.logical_or(terminated, truncated).all(): old_observation, old_info = observation, info observation, info = env.reset() info["final_observation"] = old_observation info["final_info"] = old_info - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) pipe.send(((None, reward, terminated, truncated, info), True)) elif command == "seed": env.seed(data) @@ -231,9 +235,7 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me setattr(env, name, value) pipe.send((None, True)) elif command == "_check_spaces": - pipe.send( - ((data[0] == observation_space, data[1] == env.action_space), True) - ) + pipe.send(((data[0] == observation_space, data[1] == env.action_space), True)) else: raise RuntimeError( f"Received unknown command `{command}`. Must " @@ -244,4 +246,4 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: - env.close() \ No newline at end of file + env.close() From 4b17c1539e187ec64b373a6723fb4feb1a226187 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 10:09:30 +0100 Subject: [PATCH 032/139] chore: removed unused config file --- mava/configs/default_ff_ippo_seb.yaml | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 mava/configs/default_ff_ippo_seb.yaml diff --git a/mava/configs/default_ff_ippo_seb.yaml b/mava/configs/default_ff_ippo_seb.yaml deleted file mode 100644 index 1002d90c4..000000000 --- a/mava/configs/default_ff_ippo_seb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -defaults: - - logger: ff_ippo - - arch: sebulba - - system: ppo/ff_ippo - - network: mlp - - env: gym - - _self_ From 9ec6b16db7ced8fe4953961c73ed29322db99760 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 10:58:55 +0100 Subject: [PATCH 033/139] feat: sebulba ff_ippo --- mava/configs/default_ff_mappo_seb.yaml | 7 - mava/configs/default_rec_ippo_seb.yaml | 7 - mava/systems/sebulba/ppo/ff_mappo.py | 768 ---------------------- mava/systems/sebulba/ppo/orig.py | 795 ----------------------- mava/systems/sebulba/ppo/rec_ippo.py | 850 ------------------------- mava/systems/sebulba/ppo/test.py | 86 --- mava/wrappers/gym.py | 91 ++- 7 files changed, 44 insertions(+), 2560 deletions(-) delete mode 100644 mava/configs/default_ff_mappo_seb.yaml delete mode 100644 mava/configs/default_rec_ippo_seb.yaml delete mode 100644 mava/systems/sebulba/ppo/ff_mappo.py delete mode 100644 mava/systems/sebulba/ppo/orig.py delete mode 100644 mava/systems/sebulba/ppo/rec_ippo.py delete mode 100644 mava/systems/sebulba/ppo/test.py diff --git a/mava/configs/default_ff_mappo_seb.yaml b/mava/configs/default_ff_mappo_seb.yaml deleted file mode 100644 index 8d96d3e97..000000000 --- a/mava/configs/default_ff_mappo_seb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -defaults: - - logger: ff_mappo - - arch: sebulba - - system: ppo/ff_mappo - - network: mlp - - env: gym - - _self_ diff --git a/mava/configs/default_rec_ippo_seb.yaml b/mava/configs/default_rec_ippo_seb.yaml deleted file mode 100644 index 61eaa95f1..000000000 --- a/mava/configs/default_rec_ippo_seb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -defaults: - - logger: rec_ippo - - arch: sebulba - - system: ppo/rec_ippo - - network: rnn - - env: gym - - _self_ diff --git a/mava/systems/sebulba/ppo/ff_mappo.py b/mava/systems/sebulba/ppo/ff_mappo.py deleted file mode 100644 index 66d4174bf..000000000 --- a/mava/systems/sebulba/ppo/ff_mappo.py +++ /dev/null @@ -1,768 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import time -from typing import Any, Dict, Tuple, List -import threading -import chex -import flax -import hydra -import jax -import jax.debug -import jax.numpy as jnp -import numpy as np -import optax -import queue -from collections import deque -from colorama import Fore, Style -from flax.core.frozen_dict import FrozenDict -from omegaconf import DictConfig, OmegaConf -from optax._src.base import OptState -from rich.pretty import pprint - -from mava.evaluator import make_sebulba_eval_fns as make_eval_fns -from mava.networks import FeedForwardActor as Actor -from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this Observation to use the standard obs -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, ObservationGlobalState -from mava.utils import make_env as environments -from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import ( - merge_leading_dims, - unreplicate_batch_dim, - unreplicate_n_dims, -) -from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import sebulba_check_total_timesteps -from mava.utils.training import make_learning_rate -from mava.wrappers.episode_metrics import get_final_step_metrics - - -def rollout( - key: chex.PRNGKey, - config: DictConfig, - rollout_queue: queue.Queue, - params_queue: queue.Queue, - apply_fns: Tuple, - learner_devices: List, - actor_device_id : int): - - #setup - env = environments.make_gym_env(config, config.arch.num_envs, add_global_state=True) - current_actor_device = jax.devices()[actor_device_id] - actor_apply_fn, critic_apply_fn = apply_fns - - # Define the util functions: select action function and prepare data to share it with learner. - @jax.jit - def get_action_and_value( - params: FrozenDict, - observation: ObservationGlobalState, - key: chex.PRNGKey, - ) -> Tuple: - """Get action and value.""" - key, subkey = jax.random.split(key) - - actor_policy = actor_apply_fn(params.actor_params, observation) - action = actor_policy.sample(seed=subkey) - log_prob = actor_policy.log_prob(action) - - value = critic_apply_fn(params.critic_params, observation).squeeze() - return action, log_prob, value, key - - # Define queues to track time - params_queue_get_time: deque = deque(maxlen=1) - rollout_time: deque = deque(maxlen=1) - rollout_queue_put_time: deque = deque(maxlen=1) - - next_obs , info = env.reset() - next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) - - move_to_device = lambda x : jax.device_put(x, device = current_actor_device) - - # Loop till the learner has finished training - for update in range(config.system.num_updates): - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 - - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) - - # Rollout - rollout_time_start = time.time() - storage: List = [] - - # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - - # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) - cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) - cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) - cached_next_global_obs = move_to_device(np.stack(info["global_obs"])) - - - # Get action and value - full_observation = ObservationGlobalState(cached_next_obs, cashed_action_mask, cached_next_global_obs) - inference_time_start = time.time() - ( - action, - log_prob, - value, - key, - ) = get_action_and_value(params, full_observation , key) - - - # Step the environment - inference_time += time.time() - inference_time_start - env_send_time_start = time.time() - cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) - env_send_time += time.time() - env_send_time_start - - # Prepare the data - storage_time_start = time.time() - next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics - - # Append data to storage - storage.append( - PPOTransition( - done=cached_next_dones, - action=action, - value=value, - reward=next_reward, - log_prob=log_prob, - obs=full_observation, - info=metrics, - ) - ) - storage_time += time.time() - storage_time_start - rollout_time.append(time.time() - rollout_time_start) - - parse_timer = time.time() - - # Prepare data to share with learner - #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] - stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) - - - # Split the arrays over the different learner_devices on the num_envs axis - shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) - - sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) - - # (num_learner_devices, num_envs, num_agents, ...) - sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) - sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) - sharded_next_global_obs = shard_split_payload(np.stack(info["global_obs"]), 0) - sharded_next_done = shard_split_payload(next_dones, 0) - - # Pack the obs and action mask - payload_obs = ObservationGlobalState(sharded_next_obs, sharded_next_action_mask, sharded_next_global_obs) - - # For debugging - speed_info = { - "rollout_time": np.mean(rollout_time), - "params_queue_get_time": np.mean(params_queue_get_time), - "action_inference": inference_time, - "storage_time": storage_time, - "env_step_time": env_send_time, - "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, - "parse_time" : time.time() - parse_timer, - } - #print(speed_info) - - payload = ( - sharded_storage, - payload_obs, - sharded_next_done, - ) - - # Put data in the rollout queue to share it with the learner - rollout_queue_put_time_start = time.time() - rollout_queue.put(payload) - rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) - - -def get_learner_fn( - apply_fns: Tuple[ActorApply, CriticApply], - update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], - config: DictConfig, -) -> LearnerFn[LearnerState]: - """Get the learner function.""" - - # Get apply and update functions for actor and critic networks. - actor_apply_fn, critic_apply_fn = apply_fns - actor_update_fn, critic_update_fn = update_fns - - def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: ObservationGlobalState, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: - """A single update of the network. - - This function steps the environment and records the trajectory batch for - training. It then calculates advantages and targets based on the recorded - trajectory and updates the actor and critic networks based on the calculated - losses. - - Args: - learner_state (NamedTuple): - - params (Params): The current model parameters. - - opt_states (OptStates): The current optimizer states. - - key (PRNGKey): The random number generator state. - - env_state (State): The environment state. - - last_timestep (TimeStep): The last timestep in the current trajectory. - _ (Any): The current metrics info. - """ - - def _calculate_gae( #todo: lake sure this is appropriate - traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array - ) -> Tuple[chex.Array, chex.Array]: - def _get_advantages( - carry: Tuple[chex.Array, chex.Array, chex.Array], transition: PPOTransition - ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: - gae, next_value, next_done = carry - done, value, reward = transition.done, transition.value, transition.reward - gamma = config.system.gamma - delta = reward + gamma * next_value * (1 - next_done) - value - gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae - return (gae, value, done), gae - - _, advantages = jax.lax.scan( - _get_advantages, - (jnp.zeros_like(last_val), last_val, last_done), - traj_batch, - reverse=True, - unroll=16, - ) - return advantages, advantages + traj_batch.value - - # CALCULATE ADVANTAGE - params, opt_states, key, _, _ = learner_state - last_val = critic_apply_fn(params.critic_params, last_obs) - advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) - - def _update_epoch(update_state: Tuple, _: Any) -> Tuple: - """Update the network for a single epoch.""" - - def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: - """Update the network for a single minibatch.""" - - # UNPACK TRAIN STATE AND BATCH INFO - params, opt_states, key = train_state - traj_batch, advantages, targets = batch_info - - def _actor_loss_fn( - actor_params: FrozenDict, - actor_opt_state: OptState, - traj_batch: PPOTransition, - gae: chex.Array, - key: chex.PRNGKey, - ) -> Tuple: - """Calculate the actor loss.""" - # RERUN NETWORK - actor_policy = actor_apply_fn(actor_params, traj_batch.obs) - log_prob = actor_policy.log_prob(traj_batch.action) - - # CALCULATE ACTOR LOSS - ratio = jnp.exp(log_prob - traj_batch.log_prob) - gae = (gae - gae.mean()) / (gae.std() + 1e-8) - loss_actor1 = ratio * gae - loss_actor2 = ( - jnp.clip( - ratio, - 1.0 - config.system.clip_eps, - 1.0 + config.system.clip_eps, - ) - * gae - ) - loss_actor = -jnp.minimum(loss_actor1, loss_actor2) - loss_actor = loss_actor.mean() - # The seed will be used in the TanhTransformedDistribution: - entropy = actor_policy.entropy(seed=key).mean() - - total_loss_actor = loss_actor - config.system.ent_coef * entropy - return total_loss_actor, (loss_actor, entropy) - - def _critic_loss_fn( - critic_params: FrozenDict, - critic_opt_state: OptState, - traj_batch: PPOTransition, - targets: chex.Array, - ) -> Tuple: - """Calculate the critic loss.""" - # RERUN NETWORK - value = critic_apply_fn(critic_params, traj_batch.obs) - - # CALCULATE VALUE LOSS - value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( - -config.system.clip_eps, config.system.clip_eps - ) - value_losses = jnp.square(value - targets) - value_losses_clipped = jnp.square(value_pred_clipped - targets) - value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() - - critic_total_loss = config.system.vf_coef * value_loss - return critic_total_loss, (value_loss) - - # CALCULATE ACTOR LOSS - key, entropy_key = jax.random.split(key) - actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) - actor_loss_info, actor_grads = actor_grad_fn( - params.actor_params, - opt_states.actor_opt_state, - traj_batch, - advantages, - entropy_key, - ) - - # CALCULATE CRITIC LOSS - critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) - critic_loss_info, critic_grads = critic_grad_fn( - params.critic_params, opt_states.critic_opt_state, traj_batch, targets - ) - - # Compute the parallel mean (pmean) over the batch. - # This calculation is inspired by the Anakin architecture demo notebook. - # available at https://tinyurl.com/26tdzs5x - # pmean over devices. - actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="device" #todo: pmean over learner devices not all - ) - - # pmean over devices. - critic_grads, critic_loss_info = jax.lax.pmean( - (critic_grads, critic_loss_info), axis_name="device" - ) - - # UPDATE ACTOR PARAMS AND OPTIMISER STATE - actor_updates, actor_new_opt_state = actor_update_fn( - actor_grads, opt_states.actor_opt_state - ) - actor_new_params = optax.apply_updates(params.actor_params, actor_updates) - - # UPDATE CRITIC PARAMS AND OPTIMISER STATE - critic_updates, critic_new_opt_state = critic_update_fn( - critic_grads, opt_states.critic_opt_state - ) - critic_new_params = optax.apply_updates(params.critic_params, critic_updates) - - # PACK NEW PARAMS AND OPTIMISER STATE - new_params = Params(actor_new_params, critic_new_params) - new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) - # PACK LOSS INFO - total_loss = actor_loss_info[0] + critic_loss_info[0] - value_loss = critic_loss_info[1] - actor_loss = actor_loss_info[1][0] - entropy = actor_loss_info[1][1] - loss_info = { - "total_loss": total_loss, - "value_loss": value_loss, - "actor_loss": actor_loss, - "entropy": entropy, - } - return (new_params, new_opt_state, entropy_key), loss_info - - params, opt_states, traj_batch, advantages, targets, key = update_state - key, shuffle_key, entropy_key = jax.random.split(key, 3) - # SHUFFLE MINIBATCHES - batch_size = config.system.rollout_length * (config.arch.num_envs // len(config.arch.learner_device_ids)) * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor - permutation = jax.random.permutation(shuffle_key, batch_size) - batch = (traj_batch, advantages, targets) - batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) - shuffled_batch = jax.tree_util.tree_map( - lambda x: jnp.take(x, permutation, axis=0), batch - ) - minibatches = jax.tree_util.tree_map( - lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), - shuffled_batch, - ) - # UPDATE MINIBATCHES - (params, opt_states, entropy_key), loss_info = jax.lax.scan( - _update_minibatch, (params, opt_states, entropy_key), minibatches - ) - - update_state = (params, opt_states, traj_batch, advantages, targets, key) - return update_state, loss_info - - update_state = (params, opt_states, traj_batch, advantages, targets, key) - # UPDATE EPOCHS - update_state, loss_info = jax.lax.scan( - _update_epoch, update_state, None, config.system.ppo_epochs - ) - - params, opt_states, traj_batch, advantages, targets, key = update_state - learner_state = LearnerState(params, opt_states, key, None, None) - metric = traj_batch.info - return learner_state, (metric, loss_info) - - def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: - """Learner function. - - This function represents the learner, it updates the network parameters - by iteratively applying the `_update_step` function for a fixed number of - updates. The `_update_step` function is vectorized over a batch of inputs. - - Args: - learner_state (NamedTuple): - - params (Params): The initial model parameters. - - opt_states (OptStates): The initial optimizer state. - - key (chex.PRNGKey): The random number generator state. - - env_state (LogEnvState): The environment state. - - timesteps (TimeStep): The initial timestep in the initial trajectory. - """ - - - learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones) - - return ExperimentOutput( - learner_state=learner_state, - episode_metrics=episode_info, - train_metrics=loss_info, - ) - - return learner_fn - - -def learner_setup( - keys: chex.Array, config: DictConfig, learner_devices: List -) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: - """Initialise learner_fn, network, optimiser, environment and states.""" - - #create temporory envoirnments. - env = environments.make_gym_env(config, 1, add_global_state=True) - # Get number of agents and actions. - action_space = env.single_action_space - config.system.num_agents = len(action_space) - config.system.num_actions = action_space[0].n - - # PRNG keys. - key, actor_net_key, critic_net_key = keys - - # Define network and optimiser. - actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=config.system.num_actions - ) - critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) - - actor_network = Actor(torso=actor_torso, action_head=actor_action_head) - critic_network = Critic(torso=critic_torso, centralised_critic= True) - - actor_lr = make_learning_rate(config.system.actor_lr, config) - critic_lr = make_learning_rate(config.system.critic_lr, config) - - actor_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(actor_lr, eps=1e-5), - ) - critic_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(critic_lr, eps=1e-5), - ) - - # Initialise observation: Select only obs for a single agent. - obs, info = env.reset() - init_obs = jnp.stack(obs, axis = 1) # (num_envs, num_agents, ...) - init_mask = np.stack(info["actions_mask"]) # (num_envs, num_agents, num_actions) - init_global_obs = np.stack(info["global_obs"]) - init_x = ObservationGlobalState(init_obs, init_mask, init_global_obs) - - # Initialise actor params and optimiser state. - actor_params = actor_network.init(actor_net_key, init_x) - actor_opt_state = actor_optim.init(actor_params) - - # Initialise critic params and optimiser state. - critic_params = critic_network.init(critic_net_key, init_x) - critic_opt_state = critic_optim.init(critic_params) - - # Pack params. - params = Params(actor_params, critic_params) - - # Pack apply and update functions. - apply_fns = (actor_network.apply, critic_network.apply) - update_fns = (actor_optim.update, critic_optim.update) - - # Get batched iterated update and replicate it to pmap it over learner cores. - learn = get_learner_fn(apply_fns, update_fns, config) - learn = jax.pmap(learn, axis_name="device", devices = learner_devices) - - # Load model from checkpoint if specified. - if config.logger.checkpointing.load_model: - loaded_checkpoint = Checkpointer( - model_name=config.logger.system_name, - **config.logger.checkpointing.load_args, # Other checkpoint args - ) - # Restore the learner state from the checkpoint - restored_params, _ = loaded_checkpoint.restore_params(input_params=params) - # Update the params - params = restored_params - - # Define params to be replicated across devices and batches. - key, step_keys = jax.random.split(key) - opt_states = OptStates(actor_opt_state, critic_opt_state) - replicate_learner = (params, opt_states, step_keys) - - # Duplicate learner across Learner devices. - replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) - - # Initialise learner state. - params, opt_states, step_keys = replicate_learner - init_learner_state = LearnerState(params, opt_states, step_keys, None, None) - env.close() - - return learn, apply_fns, init_learner_state - - -def run_experiment(_config: DictConfig) -> float: - """Runs experiment.""" - config = copy.deepcopy(_config) - - devices = jax.devices() - learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] - - # PRNG keys. - key, key_e, actor_net_key, critic_net_key = jax.random.split( - jax.random.PRNGKey(config.system.seed), num=4 - ) - - # Sanity check of config - assert ( - config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must to be divisible by the number of learners " - - assert ( - int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.arch.n_threads_per_executor - % config.system.num_minibatches - == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" - - - # Setup learner. - learn, apply_fns , learner_state = learner_setup( - (key ,actor_net_key, critic_net_key), config, learner_devices - ) - - # Setup evaluator. - # One key per device for evaluation. - evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config, add_global_state=True) #todo: make this more generic - - # Calculate total timesteps. - config = sebulba_check_total_timesteps(config) - assert ( - config.system.num_updates > config.arch.num_evaluation - ), "Number of updates per evaluation must be less than total number of updates." - - # Calculate number of updates per evaluation. - config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) - config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation - steps_per_rollout = ( - len(config.arch.executor_device_ids) - * config.arch.n_threads_per_executor - * config.system.rollout_length - * config.arch.num_envs - * config.system.num_updates_per_eval - ) - - # Logger setup - logger = MavaLogger(config) - cfg: Dict = OmegaConf.to_container(config, resolve=True) - cfg["arch"]["devices"] = jax.devices() - pprint(cfg) - - # Set up checkpointer - save_checkpoint = config.logger.checkpointing.save_model - if save_checkpoint: - checkpointer = Checkpointer( - metadata=config, # Save all config as metadata in the checkpoint - model_name=config.logger.system_name, - **config.logger.checkpointing.save_args, # Checkpoint args - ) - - # Executor setup and launch. - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - params_queues: List = [] - rollout_queues: List = [] - for d_idx, d_id in enumerate( # Loop through each executor device - config.arch.executor_device_ids - ): - # Replicate params per executor device - device_params = jax.device_put(unreplicated_params, devices[d_id]) - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - params_queues.append(queue.Queue(maxsize=1)) - rollout_queues.append(queue.Queue(maxsize=1)) - params_queues[-1].put(device_params) - threading.Thread( - target=rollout, - args=( - jax.device_put(key, devices[d_id]), - config, - rollout_queues[-1], - params_queues[-1], - apply_fns, - learner_devices, - d_id, - ), - ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) - - - # Run experiment for the total number of updates. - max_episode_return = jnp.float32(0.0) - best_params = None - for eval_step in range(config.arch.num_evaluation): - training_start_time = time.time() - learner_speeds = [] - rollout_times = [] - - episode_metrics = [] - train_metrics = [] - - # Make sure that the - num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates - for update in range(num_updates_in_eval): - sharded_storages = [] - sharded_next_obss = [] - sharded_next_dones = [] - - rollout_start_time = time.time() - # Loop through each executor device - for d_idx, _ in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - # Get data from rollout queue - ( - sharded_storage, - sharded_next_obs, - sharded_next_done, - ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() - sharded_storages.append(sharded_storage) - sharded_next_obss.append(sharded_next_obs) - sharded_next_dones.append(sharded_next_done) - - rollout_times.append(time.time() - rollout_start_time) - - - # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) - sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) - sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) - - - learner_start_time = time.time() - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) - learner_speeds.append(time.time() - learner_start_time) - - # Stack the metrics - episode_metrics.append(learner_output.episode_metrics) - train_metrics.append(learner_output.train_metrics) - - # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - device_params = jax.device_put(unreplicated_params, devices[d_id]) - for thread_id in range(config.arch.n_threads_per_executor): - params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( - device_params - ) - - - - # Log the results of the training. - elapsed_time = time.time() - training_start_time - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) - episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - - # Separately log timesteps, actoring metrics and training metrics. - speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} - logger.log(speed_info , t, eval_step, LogEvent.MISC) - if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) - logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - - # Evaluation on the learner - evaluation_start_timer = time.time() - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) - - # Log the results of the evaluation. - elapsed_time = time.time() - evaluation_start_timer - episode_return = jnp.mean(episode_metrics["episode_return"]) - - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - - if save_checkpoint: - # Save checkpoint of learner state - checkpointer.save( - timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), - episode_return=episode_return, - ) - - if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(learner_output.learner_state.params) - max_episode_return = episode_return - - # Update runner state to continue training. - learner_state = learner_output.learner_state - - # Record the performance for the final evaluation run. - eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) - - # Measure absolute metric. - if config.arch.absolute_metric: - start_time = time.time() - - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) - - elapsed_time = time.time() - start_time - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) - - # Stop the logger. - logger.stop() - - return eval_performance - - - -@hydra.main(config_path="../../../configs", config_name="default_ff_mappo_seb.yaml", version_base="1.2") -def hydra_entry_point(cfg: DictConfig) -> float: - """Experiment entry point.""" - # Allow dynamic attributes. - OmegaConf.set_struct(cfg, False) - - # Run experiment. - eval_performance = run_experiment(cfg) - print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") - return eval_performance - - -if __name__ == "__main__": - hydra_entry_point() - -#learner_output.episode_metrics.keys() -#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file diff --git a/mava/systems/sebulba/ppo/orig.py b/mava/systems/sebulba/ppo/orig.py deleted file mode 100644 index dde0add30..000000000 --- a/mava/systems/sebulba/ppo/orig.py +++ /dev/null @@ -1,795 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mava.utils.sebulba_utils import configure_computation_environment - -configure_computation_environment() # noqa: E402 - -import copy -import queue -import threading -import time -from collections import deque -from typing import Any, Dict, List, Tuple - -import chex -import flax -import hydra -import jax -import jax.numpy as jnp -import numpy as np -import optax -from chex import PRNGKey -from colorama import Fore, Style -from flax.core.frozen_dict import FrozenDict -from omegaconf import DictConfig, OmegaConf -from rich.pretty import pprint - -from mava.evaluator import get_sebulba_ff_evaluator as evaluator_setup -from mava.logger import Logger -from mava.networks import get_networks -from mava.types import ( - ActorApply, - CriticApply, - LearnerState, - OptStates, - Params, -) -from mava.types import PPOTransition as Transition -from mava.types import SebulbaLearnerFn as LearnerFn -from mava.types import SingleDeviceFn -from mava.utils.checkpointing import Checkpointer -from mava.utils.jax import merge_leading_dims -from mava.utils.make_env import make - - -def rollout( # noqa: CCR001 - rng: PRNGKey, - config: DictConfig, - rollout_queue: queue.Queue, - params_queue: queue.Queue, - device_thread_id: int, - apply_fns: Tuple, - logger: Logger, - learner_devices: List, -) -> None: - """Executor rollout loop.""" - # Create envs - envs = make(config)(config.arch.num_envs) # type: ignore - - # Setup - len_executor_device_ids = len(config.arch.executor_device_ids) - t_env = 0 - start_time = time.time() - - # Get the apply functions for the actor and critic networks. - vmap_actor_apply, vmap_critic_apply = apply_fns - - # Define the util functions: select action function and prepare data to share it with learner. - @jax.jit - def get_action_and_value( - params: FrozenDict, - observation: Observation, - key: PRNGKey, - ) -> Tuple: - """Get action and value.""" - key, subkey = jax.random.split(key) - - policy = vmap_actor_apply(params.actor_params, observation) - action, logprob = policy.sample_and_log_prob(seed=subkey) - - value = vmap_critic_apply(params.critic_params, observation).squeeze() - return action, logprob, value, key - - @jax.jit - def prepare_data(storage: List[Transition]) -> Transition: - """Prepare data to share with learner.""" - return jax.tree_map( # type: ignore - lambda *xs: jnp.split(jnp.stack(xs), len(learner_devices), axis=1), *storage - ) - - # Define the episode info - env_id = np.arange(config.arch.num_envs) - # Accumulated episode returns - episode_returns = np.zeros((config.arch.num_envs,), dtype=np.float32) - # Final episode returns - returned_episode_returns = np.zeros((config.arch.num_envs,), dtype=np.float32) - # Accumulated episode lengths - episode_lengths = np.zeros((config.arch.num_envs,), dtype=np.float32) - # Final episode lengths - returned_episode_lengths = np.zeros((config.arch.num_envs,), dtype=np.float32) - - # Define the data structure - params_queue_get_time: deque = deque(maxlen=10) - rollout_time: deque = deque(maxlen=10) - rollout_queue_put_time: deque = deque(maxlen=10) - - # Reset envs - next_obs, infos = envs.reset() - next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) - - # Loop till the learner has finished training - for update in range(1, config.system.num_updates + 2): - # Setup - env_recv_time: float = 0 - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 - - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - if config.arch.concurrency: - if update != 2: - params = params_queue.get() - params.network_params["params"]["Dense_0"]["kernel"].block_until_ready() - else: - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) - - # Rollout - rollout_time_start = time.time() - storage: List = [] - # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - # Get previous step info - cached_next_obs = next_obs - cached_next_dones = next_dones - cashed_action_mask = np.stack(infos["actions_mask"]) - - # Increment current timestep - t_env += ( - config.arch.n_threads_per_executor * len_executor_device_ids * config.arch.num_envs - ) - - # Get action and value - inference_time_start = time.time() - ( - action, - logprob, - value, - rng, - ) = get_action_and_value(params, Observation(cached_next_obs, cashed_action_mask), rng) - inference_time += time.time() - inference_time_start - - # Step the environment - env_send_time_start = time.time() - cpu_action = np.array(action) - next_obs, next_reward, terminated, truncated, infos = envs.step(cpu_action) - next_done = terminated + truncated - next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), - (next_done), - ) - - # Append data to storage - env_send_time += time.time() - env_send_time_start - storage_time_start = time.time() - storage.append( - Transition( - done=cached_next_dones, - action=action, - value=value, - reward=next_reward, - log_prob=logprob, - obs=cached_next_obs, - info=np.stack(infos["actions_mask"]), # Add action mask to info - ) - ) - storage_time += time.time() - storage_time_start - - # Update episode info ---------------------------------------------------------------------------------------------------------- this is kinda cringe? - episode_returns[env_id] += np.mean(next_reward, axis = 1) - returned_episode_returns[env_id] = np.where( - next_done, - episode_returns[env_id], - returned_episode_returns[env_id], - ) - episode_returns[env_id] *= (1 - next_done) * (1 - truncated) - episode_lengths[env_id] += 1 - returned_episode_lengths[env_id] = np.where( - next_done, - episode_lengths[env_id], - returned_episode_lengths[env_id], - ) - episode_lengths[env_id] *= (1 - next_done) * (1 - truncated) - rollout_time.append(time.time() - rollout_time_start) - - # Prepare data to share with learner - partitioned_storage = prepare_data(storage) - sharded_storage = Transition( - *list( # noqa: C417 - map( - lambda x: jax.device_put_sharded(x, devices=learner_devices), # type: ignore - partitioned_storage, - ) - ) - ) - sharded_next_obs = jax.device_put_sharded( - np.split(next_obs, len(learner_devices)), devices=learner_devices - ) - sharded_next_done = jax.device_put_sharded( - np.split(next_dones, len(learner_devices)), devices=learner_devices - ) - sharded_next_action_mask = jax.device_put_sharded( - np.split(np.stack(infos["actions_mask"]), len(learner_devices)), devices=learner_devices - ) - payload = ( - t_env, - sharded_storage, - sharded_next_obs, - sharded_next_done, - sharded_next_action_mask, - np.mean(params_queue_get_time), - ) - - # Put data in the rollout queue to share it with the learner - rollout_queue_put_time_start = time.time() - rollout_queue.put(payload) - rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) - - if (update % config.arch.log_frequency == 0) or (config.system.num_updates + 1 == update): - # Log info - logger.log_executor_metrics( - t_env=t_env, - metrics={ - "episodes_info": { - "episode_return": returned_episode_returns, - "episode_length": returned_episode_lengths, - "steps_per_second": int(t_env / (time.time() - start_time)), - }, - "speed_info": { - "rollout_time": np.mean(rollout_time), - }, - "queue_info": { - "params_queue_get_time": np.mean(params_queue_get_time), - "env_recv_time": env_recv_time, - "inference_time": inference_time, - "storage_time": storage_time, - "env_send_time": env_send_time, - "rollout_queue_put_time": np.mean(rollout_queue_put_time), - }, - }, - device_thread_id=device_thread_id, - ) - - -def get_learner_fn( - apply_fns: Tuple[ActorApply, CriticApply], - update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], - config: DictConfig, -) -> LearnerFn: - """Get the learner function.""" - # Get apply and update functions for actor and critic networks. - actor_apply_fn, critic_apply_fn = apply_fns - actor_update_fn, critic_update_fn = update_fns - - def single_device_update( - agents_state: LearnerState, - traj_batch: Transition, - last_observation: Observation, - rng: PRNGKey, - ) -> Tuple[LearnerState, chex.PRNGKey, Tuple]: - params, opt_states, _, _, _ = agents_state - - def _calculate_gae( - traj_batch: Transition, last_val: chex.Array - ) -> Tuple[chex.Array, chex.Array]: - """Calculate the GAE.""" - - def _get_advantages(gae_and_next_value: Tuple, transition: Transition) -> Tuple: - """Calculate the GAE for a single transition.""" - gae, next_value = gae_and_next_value - done, value, reward = ( - transition.done, - transition.value, - transition.reward, - ) - gamma = config.system.gamma - delta = reward + gamma * next_value * (1 - done) - value - gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae - return (gae, value), gae - - _, advantages = jax.lax.scan( - _get_advantages, - (jnp.zeros_like(last_val), last_val), - traj_batch, - reverse=True, - unroll=16, - ) - return advantages, advantages + traj_batch.value - - # Calculate GAE - last_val = critic_apply_fn(params.critic_params, last_observation) - advantages, targets = _calculate_gae(traj_batch, last_val) - - def _update_epoch(update_state: Tuple, _: Any) -> Tuple: - """Update the network for a single epoch.""" - - def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: - """Update the network for a single minibatch.""" - - # UNPACK TRAIN STATE AND BATCH INFO - params, opt_states = train_state - traj_batch, advantages, targets = batch_info - - def _actor_loss_fn( - actor_params: FrozenDict, - actor_opt_state: OptStates, - traj_batch: Transition, - gae: chex.Array, - ) -> Tuple: - """Calculate the actor loss.""" - # RERUN NETWORK - actor_policy = actor_apply_fn(actor_params, traj_batch.obs) - log_prob = actor_policy.log_prob(traj_batch.action) - - # CALCULATE ACTOR LOSS - ratio = jnp.exp(log_prob - traj_batch.log_prob) - gae = (gae - gae.mean()) / (gae.std() + 1e-8) - loss_actor1 = ratio * gae - loss_actor2 = ( - jnp.clip( - ratio, - 1.0 - config.system.clip_eps, - 1.0 + config.system.clip_eps, - ) - * gae - ) - loss_actor = -jnp.minimum(loss_actor1, loss_actor2) - loss_actor = loss_actor.mean() - entropy = actor_policy.entropy().mean() - - total_loss_actor = loss_actor - config.system.ent_coef * entropy - return total_loss_actor, (loss_actor, entropy) - - def _critic_loss_fn( - critic_params: FrozenDict, - critic_opt_state: OptStates, - traj_batch: Transition, - targets: chex.Array, - ) -> Tuple: - """Calculate the critic loss.""" - # RERUN NETWORK - value = critic_apply_fn(critic_params, traj_batch.obs) - - # CALCULATE VALUE LOSS - value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( - -config.system.clip_eps, config.system.clip_eps - ) - value_losses = jnp.square(value - targets) - value_losses_clipped = jnp.square(value_pred_clipped - targets) - value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() - - critic_total_loss = config.system.vf_coef * value_loss - return critic_total_loss, (value_loss) - - # CALCULATE ACTOR LOSS - actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) - actor_loss_info, actor_grads = actor_grad_fn( - params.actor_params, opt_states.actor_opt_state, traj_batch, advantages - ) - - # CALCULATE CRITIC LOSS - critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) - critic_loss_info, critic_grads = critic_grad_fn( - params.critic_params, opt_states.critic_opt_state, traj_batch, targets - ) - - # Compute the parallel mean (pmean) over the learner devices. - actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="local_devices" - ) - critic_grads, critic_loss_info = jax.lax.pmean( - (critic_grads, critic_loss_info), axis_name="local_devices" - ) - - # UPDATE ACTOR PARAMS AND OPTIMISER STATE - actor_updates, actor_new_opt_state = actor_update_fn( - actor_grads, opt_states.actor_opt_state - ) - actor_new_params = optax.apply_updates(params.actor_params, actor_updates) - - # UPDATE CRITIC PARAMS AND OPTIMISER STATE - critic_updates, critic_new_opt_state = critic_update_fn( - critic_grads, opt_states.critic_opt_state - ) - critic_new_params = optax.apply_updates(params.critic_params, critic_updates) - - # PACK NEW PARAMS AND OPTIMISER STATE - new_params = Params(actor_new_params, critic_new_params) - new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) - - # PACK LOSS INFO - total_loss = actor_loss_info[0] + critic_loss_info[0] - value_loss = critic_loss_info[1] - actor_loss = actor_loss_info[1][0] - entropy = actor_loss_info[1][1] - loss_info = (total_loss, value_loss, actor_loss, entropy) - - return (new_params, new_opt_state), loss_info - - params, opt_states, traj_batch, advantages, targets, rng = update_state - rng, shuffle_rng = jax.random.split(rng) - - # SHUFFLE MINIBATCHES - batch_size = config.system.rollout_length * config.arch.num_envs - permutation = jax.random.permutation(shuffle_rng, batch_size) - batch = (traj_batch, advantages, targets) - batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) - shuffled_batch = jax.tree_util.tree_map( - lambda x: jnp.take(x, permutation, axis=0), batch - ) - minibatches = jax.tree_util.tree_map( - lambda x: jnp.reshape(x, [config.system.num_minibatches, -1] + list(x.shape[1:])), - shuffled_batch, - ) - - # UPDATE MINIBATCHES - (params, opt_states), loss_info = jax.lax.scan( - _update_minibatch, (params, opt_states), minibatches - ) - - update_state = (params, opt_states, traj_batch, advantages, targets, rng) - return update_state, loss_info - - update_state = (params, opt_states, traj_batch, advantages, targets, rng) - - # UPDATE EPOCHS - update_state, loss_info = jax.lax.scan( - _update_epoch, update_state, None, config.system.ppo_epochs - ) - - params, opt_states, traj_batch, advantages, targets, rng = update_state - learner_state = agents_state._replace(params=params, opt_states=opt_states) - return learner_state, rng, loss_info - - def learner_fn( - agents_state: LearnerState, - sharded_storages: List, - sharded_next_obs: List, - sharded_next_done: List, - sharded_next_action_mask: List, - key: chex.PRNGKey, - ) -> Tuple: - """Single device update.""" - # Horizontal stack all the data from different devices - traj_batch = jax.tree_map(lambda *x: jnp.hstack(x), *sharded_storages) - traj_batch = traj_batch._replace(obs=Observation(traj_batch.obs, traj_batch.info)) - - # Get last observation - last_obs = jnp.concatenate(sharded_next_obs) - last_action_mask = jnp.concatenate(sharded_next_action_mask) - last_observation = Observation(last_obs, last_action_mask) - - # Update learner - agents_state, key, (total_loss, value_loss, actor_loss, entropy) = single_device_update( - agents_state, traj_batch, last_observation, key - ) - - # Pack loss info - loss_info = { - "total_loss": total_loss, - "loss_actor": actor_loss, - "value_loss": value_loss, - "entropy": entropy, - } - return agents_state, key, loss_info - - return learner_fn - - -def learner_setup( - rngs: chex.Array, config: DictConfig, learner_devices: List -) -> Tuple[SingleDeviceFn, LearnerState, Tuple[ActorApply, ActorApply]]: - """Initialise learner_fn, network, optimiser, environment and states.""" - # Get number of actions and agents. - dummy_envs = make(config)( # type: ignore - config.arch.num_envs # Create dummy_envs to get observation and action spaces - ) - config.system.num_agents = dummy_envs.single_observation_space.shape[0] - config.system.num_actions = int(dummy_envs.single_action_space.nvec[0]) - - # PRNG keys. - actor_net_key, critic_net_key = rngs - - # Define network and optimiser. - actor_network, critic_network = get_networks( - config=config, network="feedforward", centralised_critic=False - ) - actor_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(config.system.actor_lr, eps=1e-5), - ) - critic_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(config.system.critic_lr, eps=1e-5), - ) - - # Initialise observation: Select only obs for a single agent. - init_obs = np.array([dummy_envs.single_observation_space.sample()[0]]) - init_action_mask = np.ones((1, config.system.num_actions)) - init_x = Observation(init_obs, init_action_mask) - - # Initialise actor params and optimiser state. - actor_params = actor_network.init(actor_net_key, init_x) - actor_opt_state = actor_optim.init(actor_params) - - # Initialise critic params and optimiser state. - critic_params = critic_network.init(critic_net_key, init_x) - critic_opt_state = critic_optim.init(critic_params) - - # Vmap network apply function over number of agents. - vmapped_actor_network_apply_fn = jax.vmap( - actor_network.apply, - in_axes=(None, Observation(1, 1, None)), - out_axes=(1), - ) - vmapped_critic_network_apply_fn = jax.vmap( - critic_network.apply, - in_axes=(None, Observation(1, 1, None)), - out_axes=(1), - ) - - # Pack apply and update functions. - apply_fns = (vmapped_actor_network_apply_fn, vmapped_critic_network_apply_fn) - update_fns = (actor_optim.update, critic_optim.update) - - # Define agents state - agents_state = LearnerState( - params=Params( - actor_params=actor_params, - critic_params=critic_params, - ), - opt_states=OptStates( - actor_opt_state=actor_opt_state, - critic_opt_state=critic_opt_state, - ), - ) - # Replicate agents state per learner device - agents_state = flax.jax_utils.replicate(agents_state, devices=learner_devices) - - # Get Learner function: pmap over learner devices. - single_device_update = get_learner_fn(apply_fns, update_fns, config) - multi_device_update = jax.pmap( - single_device_update, - axis_name="local_devices", - devices=learner_devices, - ) - - # Close dummy envs. - dummy_envs.close() - - return multi_device_update, agents_state, apply_fns - - -def run_experiment(_config: DictConfig) -> None: # noqa: CCR001 - """Runs experiment.""" - config = copy.deepcopy(_config) - - # Setup device distribution. - local_devices = jax.local_devices() #why are we using local devices insted of devices? ------------------------------------------------------------------------------------------------------------------------------------ define a ratio insted of the devices to use? - learner_devices = [local_devices[d_id] for d_id in config.arch.learner_device_ids] - - # PRNG keys. - rng, rng_e, actor_net_key, critic_net_key = jax.random.split( - jax.random.PRNGKey(config.system.seed), num=4 - ) - learner_keys = jax.device_put_replicated(rng, learner_devices) - - # Sanity check of config - assert ( - config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "local_num_envs must be divisible by len(learner_device_ids)" - #each thread is going to devide needs to give an equal number of traj to each learning device? shound't each actor Thread have a designated N learneres? If we have less actor T than learners then ech actor will devide based on the num_env and gives to N actors, ig to lessen the managment each actor gives to all of the learners? - #this deviates from the paper? - assert ( - int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.arch.n_threads_per_executor - % config.system.num_minibatches - == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" #this one makes sense but the assertion is a bit off? - - # Setup learner. - ( - multi_device_update, - agents_state, - apply_fns, - ) = learner_setup((actor_net_key, critic_net_key), config, learner_devices) - - # Setup evaluator. - eval_envs = make(config)(config.arch.num_eval_episodes) # type: ignore - evaluator = evaluator_setup(eval_envs=eval_envs, apply_fn=apply_fns[0], config=config) - - # Calculate total timesteps. - batch_size = int( - config.arch.num_envs - * config.system.rollout_length - * config.arch.n_threads_per_executor - * len(config.arch.executor_device_ids) - ) - config.system.total_timesteps = config.system.num_updates * batch_size - - # Setup logger. - config.arch.log_frequency = config.system.num_updates // config.arch.num_evaluation - logger = Logger(config) - cfg_dict: Dict = OmegaConf.to_container(config, resolve=True) - pprint(cfg_dict) - - # Set up checkpointer - save_checkpoint = config.logger.checkpointing.save_model - if save_checkpoint: - checkpointer = Checkpointer( - metadata=cfg_dict, # Save all config as metadata in the checkpoint - model_name=config.logger.system_name, - **config.logger.checkpointing.save_args, # Checkpoint args - ) - - if config.logger.checkpointing.load_model: - print( - f"{Fore.RED}{Style.BRIGHT}Loading checkpoint is not supported\ - for sebulba architecture yet{Style.RESET_ALL}" - ) - - # Executor setup and launch. - unreplicated_params = flax.jax_utils.unreplicate(agents_state.params) - params_queues: List = [] - rollout_queues: List = [] - for d_idx, d_id in enumerate( # Loop through each executor device - config.arch.executor_device_ids - ): - # Replicate params per executor device - device_params = jax.device_put(unreplicated_params, local_devices[d_id]) - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - params_queues.append(queue.Queue(maxsize=1)) - rollout_queues.append(queue.Queue(maxsize=1)) - params_queues[-1].put(device_params) - threading.Thread( - target=rollout, - args=( - jax.device_put(rng, local_devices[d_id]), - config, - rollout_queues[-1], - params_queues[-1], - d_idx * config.arch.n_threads_per_executor + thread_id, - apply_fns, - logger, - learner_devices, - ), - ).start() - - # Run experiment for the total number of updates. - rollout_queue_get_time: deque = deque(maxlen=10) - data_transfer_time: deque = deque(maxlen=10) - trainer_update_number = 0 - max_episode_return = jnp.float32(0.0) - best_params = None - while True: - trainer_update_number += 1 - rollout_queue_get_time_start = time.time() - sharded_storages = [] - sharded_next_obss = [] - sharded_next_dones = [] - sharded_next_action_masks = [] - - # Loop through each executor device - for d_idx, _ in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - # Get data from rollout queue - ( - t_env, - sharded_storage, - sharded_next_obs, - sharded_next_done, - sharded_next_action_mask, - avg_params_queue_get_time, - ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() - sharded_storages.append(sharded_storage) - sharded_next_obss.append(sharded_next_obs) - sharded_next_dones.append(sharded_next_done) - sharded_next_action_masks.append(sharded_next_action_mask) - - rollout_queue_get_time.append(time.time() - rollout_queue_get_time_start) - training_time_start = time.time() - - # Update learner - (agents_state, learner_keys, loss_info) = multi_device_update( # type: ignore - agents_state, - sharded_storages, - sharded_next_obss, - sharded_next_dones, - sharded_next_action_masks, - learner_keys, - ) - - # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(agents_state.params) - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - device_params = jax.device_put(unreplicated_params, local_devices[d_id]) - for thread_id in range(config.arch.n_threads_per_executor): - params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( - device_params - ) - - if trainer_update_number % config.arch.log_frequency == 0: - # Logging training info - logger.log_trainer_metrics( - experiment_output={ - "loss_info": loss_info, - "queue_info": { - "rollout_queue_get_time": np.mean(rollout_queue_get_time), - "data_transfer_time": np.mean(data_transfer_time), - "rollout_params_queue_get_time_diff": np.mean(rollout_queue_get_time) - - avg_params_queue_get_time, - "rollout_queue_size": rollout_queues[0].qsize(), - "params_queue_size": params_queues[0].qsize(), - }, - "speed_info": { - "training_time": time.time() - training_time_start, - "trainer_update_number": trainer_update_number, - }, - }, - t_env=t_env, - ) - - # Evaluation - rng_e, _ = jax.random.split(rng_e) - evaluator_output = evaluator(params=unreplicated_params, rng=rng_e) - # Log the results of the evaluation. - episode_return = logger.log_evaluator_metrics( - t_env=t_env, - metrics=evaluator_output, - eval_step=trainer_update_number, - ) - - if save_checkpoint: - # Save checkpoint of learner state - checkpointer.save( - timestep=t_env, - unreplicated_learner_state=flax.jax_utils.unreplicate(agents_state), - episode_return=episode_return, - ) - - if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(unreplicated_params) - max_episode_return = episode_return - - # Check if training is finished - if trainer_update_number >= config.system.num_updates: - rng_e, _ = jax.random.split(rng_e) - # Measure absolute metric - evaluator_output = evaluator(params=best_params, rng=rng_e, eval_multiplier=10) - # Log the results of the evaluation. - logger.log_evaluator_metrics( - t_env=t_env, - metrics=evaluator_output, - eval_step=trainer_update_number + 1, - absolute_metric=True, - ) - break - - -@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") -def hydra_entry_point(cfg: DictConfig) -> None: - """Experiment entry point.""" - - # Run experiment. - run_experiment(cfg) - - print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") - - -if __name__ == "__main__": - hydra_entry_point() \ No newline at end of file diff --git a/mava/systems/sebulba/ppo/rec_ippo.py b/mava/systems/sebulba/ppo/rec_ippo.py deleted file mode 100644 index 6e204fb21..000000000 --- a/mava/systems/sebulba/ppo/rec_ippo.py +++ /dev/null @@ -1,850 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import time -from typing import Any, Dict, Tuple, List -import threading -import chex -import flax -import hydra -import jax -import jax.debug -import jax.numpy as jnp -import numpy as np -import optax -import queue -from collections import deque -from colorama import Fore, Style -from flax.core.frozen_dict import FrozenDict -from omegaconf import DictConfig, OmegaConf -from optax._src.base import OptState -from rich.pretty import pprint - -from mava.evaluator import make_sebulba_eval_fns as make_eval_fns -from mava.networks import RecurrentActor as Actor -from mava.networks import RecurrentValueNet as Critic -from mava.networks import ScannedRNN -from mava.systems.anakin.ppo.types import ( - HiddenStates, - OptStates, - Params, - RNNLearnerState, - RNNPPOTransition, -) -from mava.types import ExperimentOutput, LearnerFn, RecActorApply, RecCriticApply, RNNObservation, Observation -from mava.utils import make_env as environments -from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import ( - merge_leading_dims, - unreplicate_batch_dim, - unreplicate_n_dims, -) -from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import sebulba_check_total_timesteps -from mava.utils.training import make_learning_rate -from mava.wrappers.episode_metrics import get_final_step_metrics - - -def rollout( - key: chex.PRNGKey, - config: DictConfig, - rollout_queue: queue.Queue, - params_queue: queue.Queue, - apply_fns: Tuple, - learner_devices: List, - actor_device_id : int, - init_hstates : HiddenStates): - - #setup - - env = environments.make_gym_env(config, config.arch.num_envs) - current_actor_device = jax.devices()[actor_device_id] - actor_apply_fn, critic_apply_fn = apply_fns - - # Define the util functions: select action function and prepare data to share it with learner. - @jax.jit - def get_action_and_value( - params: FrozenDict, - observation: RNNObservation, - last_hstates : HiddenStates, - key: chex.PRNGKey, - ) -> Tuple: - """Get action and value.""" - key, subkey = jax.random.split(key) - - policy_hidden_state, actor_policy = actor_apply_fn(params.actor_params, last_hstates.policy_hidden_state, observation) - action = actor_policy.sample(seed=subkey) - log_prob = actor_policy.log_prob(action) - - critic_hidden_state, value = critic_apply_fn(params.critic_params, last_hstates.critic_hidden_state, observation) - hastates = HiddenStates(policy_hidden_state, critic_hidden_state) - return action, log_prob, value, key, hastates - - # Define queues to track time - params_queue_get_time: deque = deque(maxlen=1) - rollout_time: deque = deque(maxlen=1) - rollout_queue_put_time: deque = deque(maxlen=1) - - next_obs , info = env.reset() - next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) - next_hstates = init_hstates - move_to_device = lambda x : jax.device_put(x, device = current_actor_device) - - # Loop till the learner has finished training - for update in range(config.system.num_updates): - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 - - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) - - # Rollout - rollout_time_start = time.time() - storage: List = [] - - # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - - # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) - cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) - cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) - - # Add the sequence_len dim - cached_next_obs, cached_next_dones, cashed_action_mask = jax.tree_map(lambda x: x[jnp.newaxis, : ], (cached_next_obs, cached_next_dones, cashed_action_mask)) - - full_observation = Observation(cached_next_obs, cashed_action_mask) - full_observation_dones = (full_observation, cached_next_dones) - cashed_next_hstate = move_to_device(next_hstates) - # Get action and value - inference_time_start = time.time() - ( - action, - log_prob, - value, - key, - next_hstates - ) = get_action_and_value(params, full_observation_dones, cashed_next_hstate, key) - - - # Step the environment - inference_time += time.time() - inference_time_start - env_send_time_start = time.time() - cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action[0].swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) - env_send_time += time.time() - env_send_time_start - - # Prepare the data - storage_time_start = time.time() - next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics - - # Append data to storage - storage.append( - RNNPPOTransition( - done=cached_next_dones[0], - action=action[0], - value=value[0], - reward=next_reward, - log_prob=log_prob[0], - obs=Observation(cached_next_obs[0], cashed_action_mask[0]), - hstates=cashed_next_hstate, - info=metrics, - ) - ) - storage_time += time.time() - storage_time_start - rollout_time.append(time.time() - rollout_time_start) - - parse_timer = time.time() - - # Prepare data to share with learner - #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] - stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) - - # Split the arrays over the different learner_devices on the num_envs axis - shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) - - sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) - - # (num_learner_devices, num_envs, num_agents, ...) - sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) - sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) - sharded_next_done = shard_split_payload(next_dones, 0) - sharded_next_hstate = jax.tree_map( lambda x: shard_split_payload(x,0), next_hstates) - - # Pack the obs and action mask - payload_obs_dones = (Observation(sharded_next_obs, sharded_next_action_mask), cached_next_dones) - - # For debugging - speed_info = { - "rollout_time": np.mean(rollout_time), - "params_queue_get_time": np.mean(params_queue_get_time), - "action_inference": inference_time, - "storage_time": storage_time, - "env_step_time": env_send_time, - "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, - "parse_time" : time.time() - parse_timer, - } - #print(speed_info) - - payload = ( - sharded_storage, - payload_obs_dones, - sharded_next_done, - sharded_next_hstate - ) - - # Put data in the rollout queue to share it with the learner - rollout_queue_put_time_start = time.time() - rollout_queue.put(payload) - rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) - - -def get_learner_fn( - apply_fns: Tuple[ RecActorApply, RecCriticApply], - update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], - config: DictConfig, -) -> LearnerFn[RNNLearnerState]: - """Get the learner function.""" - - # Get apply and update functions for actor and critic networks. - actor_apply_fn, critic_apply_fn = apply_fns - actor_update_fn, critic_update_fn = update_fns - - def _update_step(learner_state: RNNLearnerState, traj_batch : RNNPPOTransition, last_obs: RNNObservation, last_dones : chex.Array, last_hstate : HiddenStates) -> Tuple[RNNLearnerState, Tuple]: - """A single update of the network. - - This function steps the environment and records the trajectory batch for - training. It then calculates advantages and targets based on the recorded - trajectory and updates the actor and critic networks based on the calculated - losses. - - Args: - learner_state (NamedTuple): - - params (Params): The current model parameters. - - opt_states (OptStates): The current optimizer states. - - key (PRNGKey): The random number generator state. - - env_state (State): The environment state. - - last_timestep (TimeStep): The last timestep in the current trajectory. - _ (Any): The current metrics info. - """ - - def _calculate_gae( #todo: lake sure this is appropriate - traj_batch: RNNPPOTransition, last_val: chex.Array, last_done: chex.Array - ) -> Tuple[chex.Array, chex.Array]: - def _get_advantages( - carry: Tuple[chex.Array, chex.Array, chex.Array], transition: RNNPPOTransition - ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: - gae, next_value, next_done = carry - done, value, reward = transition.done, transition.value, transition.reward - gamma = config.system.gamma - delta = reward + gamma * next_value * (1 - next_done) - value - gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae - return (gae, value, done), gae - - _, advantages = jax.lax.scan( - _get_advantages, - (jnp.zeros_like(last_val), last_val, last_done), - traj_batch, - reverse=True, - unroll=16, - ) - return advantages, advantages + traj_batch.value - - # CALCULATE ADVANTAGE - params, opt_states, key, _, _, _, _ = learner_state - last_obs = jax.tree_map(lambda x: x[jnp.newaxis, : ], last_obs) - last_dones = last_dones[jnp.newaxis, :] - - - _, last_val = critic_apply_fn(params.critic_params, last_hstate.critic_hidden_state, last_obs) - - advantages, targets = _calculate_gae(traj_batch, last_val[0], last_dones[0]) - - def _update_epoch(update_state: Tuple, _: Any) -> Tuple: - """Update the network for a single epoch.""" - - def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: - """Update the network for a single minibatch.""" - - # UNPACK TRAIN STATE AND BATCH INFO - params, opt_states, key = train_state - traj_batch, advantages, targets = batch_info - - def _actor_loss_fn( - actor_params: FrozenDict, - actor_opt_state: OptState, - traj_batch: RNNPPOTransition, - gae: chex.Array, - key: chex.PRNGKey, - ) -> Tuple: - """Calculate the actor loss.""" - # RERUN NETWORK - - obs_and_done = (traj_batch.obs, traj_batch.done) - _, actor_policy = actor_apply_fn( - actor_params, traj_batch.hstates.policy_hidden_state[0], obs_and_done - ) - log_prob = actor_policy.log_prob(traj_batch.action) - - ratio = jnp.exp(log_prob - traj_batch.log_prob) - gae = (gae - gae.mean()) / (gae.std() + 1e-8) - loss_actor1 = ratio * gae - loss_actor2 = ( - jnp.clip( - ratio, - 1.0 - config.system.clip_eps, - 1.0 + config.system.clip_eps, - ) - * gae - ) - loss_actor = -jnp.minimum(loss_actor1, loss_actor2) - loss_actor = loss_actor.mean() - # The seed will be used in the TanhTransformedDistribution: - entropy = actor_policy.entropy(seed=key).mean() - - total_loss = loss_actor - config.system.ent_coef * entropy - return total_loss, (loss_actor, entropy) - - def _critic_loss_fn( - critic_params: FrozenDict, - critic_opt_state: OptState, - traj_batch: RNNPPOTransition, - targets: chex.Array, - ) -> Tuple: - """Calculate the critic loss.""" - # RERUN NETWORK - obs_and_done = (traj_batch.obs, traj_batch.done) - _, value = critic_apply_fn( - critic_params, traj_batch.hstates.critic_hidden_state[0], obs_and_done - ) - - # CALCULATE VALUE LOSS - value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( - -config.system.clip_eps, config.system.clip_eps - ) - value_losses = jnp.square(value - targets) - value_losses_clipped = jnp.square(value_pred_clipped - targets) - value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() - - total_loss = config.system.vf_coef * value_loss - return total_loss, (value_loss) - - # CALCULATE ACTOR LOSS - key, entropy_key = jax.random.split(key) - actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) - actor_loss_info, actor_grads = actor_grad_fn( - params.actor_params, - opt_states.actor_opt_state, - traj_batch, - advantages, - entropy_key, - ) - - # CALCULATE CRITIC LOSS - critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) - critic_loss_info, critic_grads = critic_grad_fn( - params.critic_params, opt_states.critic_opt_state, traj_batch, targets - ) - - # Compute the parallel mean (pmean) over the batch. - # This calculation is inspired by the Anakin architecture demo notebook. - # available at https://tinyurl.com/26tdzs5x - # pmean over devices. - actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="device" - ) - # pmean over devices. - critic_grads, critic_loss_info = jax.lax.pmean( - (critic_grads, critic_loss_info), axis_name="device" - ) - - # UPDATE ACTOR PARAMS AND OPTIMISER STATE - actor_updates, actor_new_opt_state = actor_update_fn( - actor_grads, opt_states.actor_opt_state - ) - actor_new_params = optax.apply_updates(params.actor_params, actor_updates) - - # UPDATE CRITIC PARAMS AND OPTIMISER STATE - critic_updates, critic_new_opt_state = critic_update_fn( - critic_grads, opt_states.critic_opt_state - ) - critic_new_params = optax.apply_updates(params.critic_params, critic_updates) - - new_params = Params(actor_new_params, critic_new_params) - new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) - - # PACK LOSS INFO - total_loss = actor_loss_info[0] + critic_loss_info[0] - value_loss = critic_loss_info[1] - actor_loss = actor_loss_info[1][0] - entropy = actor_loss_info[1][1] - loss_info = { - "total_loss": total_loss, - "value_loss": value_loss, - "actor_loss": actor_loss, - "entropy": entropy, - } - - return (new_params, new_opt_state, entropy_key), loss_info - - params, opt_states, traj_batch, advantages, targets, key = update_state - key, shuffle_key, entropy_key = jax.random.split(key, 3) - - # SHUFFLE MINIBATCHES - batch = (traj_batch, advantages, targets) - num_recurrent_chunks = ( - config.system.rollout_length // config.system.recurrent_chunk_size - ) - batch = jax.tree_util.tree_map( - lambda x: x.reshape( - config.system.recurrent_chunk_size, - config.arch.num_envs * num_recurrent_chunks, - *x.shape[2:], - ), - batch, - ) - permutation = jax.random.permutation( - shuffle_key, config.arch.num_envs * num_recurrent_chunks - ) - shuffled_batch = jax.tree_util.tree_map( - lambda x: jnp.take(x, permutation, axis=1), batch - ) - reshaped_batch = jax.tree_util.tree_map( - lambda x: jnp.reshape( - x, (x.shape[0], config.system.num_minibatches, -1, *x.shape[2:]) - ), - shuffled_batch, - ) - minibatches = jax.tree_util.tree_map(lambda x: jnp.swapaxes(x, 1, 0), reshaped_batch) - - # UPDATE MINIBATCHES - (params, opt_states, entropy_key), loss_info = jax.lax.scan( - _update_minibatch, (params, opt_states, entropy_key), minibatches - ) - - update_state = ( - params, - opt_states, - traj_batch, - advantages, - targets, - key, - ) - return update_state, loss_info - - update_state = (params, opt_states, traj_batch, advantages, targets, key) - # UPDATE EPOCHS - update_state, loss_info = jax.lax.scan( - _update_epoch, update_state, None, config.system.ppo_epochs - ) - - params, opt_states, traj_batch, advantages, targets, key = update_state - learner_state = RNNLearnerState(params, opt_states, key, None, None, None, None) - metric = traj_batch.info - return learner_state, (metric, loss_info) - - def learner_fn(learner_state: RNNLearnerState, traj_batch : RNNPPOTransition, last_obs: chex.Array, last_dones : chex.Array, last_hstate : chex.Array) -> ExperimentOutput[RNNLearnerState]: - """Learner function. - - This function represents the learner, it updates the network parameters - by iteratively applying the `_update_step` function for a fixed number of - updates. The `_update_step` function is vectorized over a batch of inputs. - - Args: - learner_state (NamedTuple): - - params (Params): The initial model parameters. - - opt_states (OptStates): The initial optimizer state. - - key (chex.PRNGKey): The random number generator state. - - env_state (LogEnvState): The environment state. - - timesteps (TimeStep): The initial timestep in the initial trajectory. - """ - - - learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones, last_hstate) - - return ExperimentOutput( - learner_state=learner_state, - episode_metrics=episode_info, - train_metrics=loss_info, - ) - - return learner_fn - - -def learner_setup( - keys: chex.Array, config: DictConfig, learner_devices: List -) -> Tuple[LearnerFn[RNNLearnerState], Actor, RNNLearnerState]: - """Initialise learner_fn, network, optimiser, environment and states.""" - - #create temporory envoirnments. - env = environments.make_gym_env(config, 1) - # Get number of agents and actions. - action_space = env.single_action_space - config.system.num_agents = len(action_space) - config.system.num_actions = action_space[0].n - - # PRNG keys. - key, actor_net_key, critic_net_key = keys - - # Define network and optimisers. - actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=config.system.num_actions - ) - critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) - critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) - - actor_network = Actor( - pre_torso=actor_pre_torso, - post_torso=actor_post_torso, - action_head=actor_action_head, - hidden_state_dim=config.network.hidden_state_dim, - ) - critic_network = Critic( - pre_torso=critic_pre_torso, - post_torso=critic_post_torso, - hidden_state_dim=config.network.hidden_state_dim, - ) - - actor_lr = make_learning_rate(config.system.actor_lr, config) - critic_lr = make_learning_rate(config.system.critic_lr, config) - - actor_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(actor_lr, eps=1e-5), - ) - critic_optim = optax.chain( - optax.clip_by_global_norm(config.system.max_grad_norm), - optax.adam(critic_lr, eps=1e-5), - ) - - # Initialise observation: Select only obs for a single agent. - init_obs = jnp.array([[env.single_observation_space.sample()]]) - init_action_mask = jnp.ones((config.system.num_agents, config.system.num_actions)) - init_dones = jnp.zeros((1, 1, config.system.num_agents), dtype=jax.numpy.bool_) - init_x = (Observation(init_obs, init_action_mask), init_dones) - - # Initialise hidden states. - init_policy_hstate = ScannedRNN.initialize_carry( - (config.arch.num_envs, config.system.num_agents), config.network.hidden_state_dim - ) - init_critic_hstate = ScannedRNN.initialize_carry( - (config.arch.num_envs, config.system.num_agents), config.network.hidden_state_dim - ) - - # initialise params and optimiser state. - actor_params = actor_network.init(actor_net_key, init_policy_hstate, init_x) - actor_opt_state = actor_optim.init(actor_params) - critic_params = critic_network.init(critic_net_key, init_critic_hstate, init_x) - critic_opt_state = critic_optim.init(critic_params) - - # Get network apply functions and optimiser updates. - apply_fns = (actor_network.apply, critic_network.apply) - update_fns = (actor_optim.update, critic_optim.update) - - # Get batched iterated update and replicate it to pmap it over learner cores. - learn = get_learner_fn(apply_fns, update_fns, config) - learn = jax.pmap(learn, axis_name="device", devices = learner_devices) - - # Pack params and initial states. - params = Params(actor_params, critic_params) - hstates = HiddenStates(init_policy_hstate, init_critic_hstate) - - # Load model from checkpoint if specified. - if config.logger.checkpointing.load_model: - loaded_checkpoint = Checkpointer( - model_name=config.logger.system_name, - **config.logger.checkpointing.load_args, # Other checkpoint args - ) - # Restore the learner state from the checkpoint - restored_params, restored_hstates = loaded_checkpoint.restore_params( - input_params=params, restore_hstates=True, THiddenState=HiddenStates - ) - # Update the params and hstates - params = restored_params - hstates = restored_hstates if restored_hstates else hstates - - # Define params to be replicated across devices and batches. - key, step_keys = jax.random.split(key) - opt_states = OptStates(actor_opt_state, critic_opt_state) - replicate_learner = (params, opt_states, hstates, step_keys) - - # Duplicate learner across Learner devices. - replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) - - # Initialise learner state. - params, opt_states, hstates, step_keys = replicate_learner - init_learner_state = RNNLearnerState(params, opt_states, step_keys, None, None, init_dones, hstates) - env.close() - - return learn, apply_fns, init_learner_state - - -def run_experiment(_config: DictConfig) -> float: - """Runs experiment.""" - config = copy.deepcopy(_config) - - devices = jax.devices() - learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] - - # PRNG keys. - key, key_e, actor_net_key, critic_net_key = jax.random.split( - jax.random.PRNGKey(config.system.seed), num=4 - ) - - # Sanity check of config - if config.system.recurrent_chunk_size is None: - config.system.recurrent_chunk_size = config.system.rollout_length - else: - assert ( - config.system.rollout_length % config.system.recurrent_chunk_size == 0 - ), "Rollout length must be divisible by recurrent chunk size." - assert ( - config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must to be divisible by the number of learners " - - assert ( - int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.arch.n_threads_per_executor - % config.system.num_minibatches - == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" - - - # Setup learner. - learn, apply_fns , learner_state = learner_setup( - (key ,actor_net_key, critic_net_key), config, learner_devices - ) - - # Setup evaluator. - # One key per device for evaluation. - evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config,use_recurrent_net = True, scanned_rnn = ScannedRNN) #todo: make this more generic - - # Calculate total timesteps. - config = sebulba_check_total_timesteps(config) - assert ( - config.system.num_updates > config.arch.num_evaluation - ), "Number of updates per evaluation must be less than total number of updates." - - # Calculate number of updates per evaluation. - config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) - config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation - steps_per_rollout = ( - len(config.arch.executor_device_ids) - * config.arch.n_threads_per_executor - * config.system.rollout_length - * config.arch.num_envs - * config.system.num_updates_per_eval - ) - - # Logger setup - logger = MavaLogger(config) - cfg: Dict = OmegaConf.to_container(config, resolve=True) - cfg["arch"]["devices"] = jax.devices() - pprint(cfg) - - # Set up checkpointer - save_checkpoint = config.logger.checkpointing.save_model - if save_checkpoint: - checkpointer = Checkpointer( - metadata=config, # Save all config as metadata in the checkpoint - model_name=config.logger.system_name, - **config.logger.checkpointing.save_args, # Checkpoint args - ) - - # Executor setup and launch. - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - unreplicated_hstates = flax.jax_utils.unreplicate(learner_state.hstates) - params_queues: List = [] - rollout_queues: List = [] - for d_idx, d_id in enumerate( # Loop through each executor device - config.arch.executor_device_ids - ): - # Replicate params per executor device - device_params = jax.device_put(unreplicated_params, devices[d_id]) - device_hstates = jax.device_put(unreplicated_hstates, devices[d_id]) - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - params_queues.append(queue.Queue(maxsize=1)) - rollout_queues.append(queue.Queue(maxsize=1)) - params_queues[-1].put(device_params) - threading.Thread( - target=rollout, - args=( - jax.device_put(key, devices[d_id]), - config, - rollout_queues[-1], - params_queues[-1], - apply_fns, - learner_devices, - d_id, - device_hstates, - ), - ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) - - # Run experiment for the total number of updates. - max_episode_return = jnp.float32(0.0) - best_params = None - for eval_step in range(config.arch.num_evaluation): - training_start_time = time.time() - learner_speeds = [] - rollout_times = [] - - episode_metrics = [] - train_metrics = [] - - # Make sure that the - num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates - for update in range(num_updates_in_eval): - sharded_storages = [] - sharded_next_obss = [] - sharded_next_dones = [] - sharded_next_hstates = [] - - rollout_start_time = time.time() - # Loop through each executor device - for d_idx, _ in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - # Get data from rollout queue - ( - sharded_storage, - sharded_next_obs, - sharded_next_done, - sharded_next_hstate, - ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() - sharded_storages.append(sharded_storage) - sharded_next_obss.append(sharded_next_obs) - sharded_next_dones.append(sharded_next_done) - sharded_next_hstates.append(sharded_next_hstate) - - rollout_times.append(time.time() - rollout_start_time) - - - # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) - sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) - sharded_next_hstates = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_hstates) - - sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) - - learner_start_time = time.time() - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones, sharded_next_hstates) - learner_speeds.append(time.time() - learner_start_time) - - # Stack the metrics - episode_metrics.append(learner_output.episode_metrics) - train_metrics.append(learner_output.train_metrics) - - # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - device_params = jax.device_put(unreplicated_params, devices[d_id]) - for thread_id in range(config.arch.n_threads_per_executor): - params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( - device_params - ) - - - - # Log the results of the training. - elapsed_time = time.time() - training_start_time - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) - episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - - # Separately log timesteps, actoring metrics and training metrics. - speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} - logger.log(speed_info , t, eval_step, LogEvent.MISC) - if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) - logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - - # Evaluation on the learner - evaluation_start_timer = time.time() - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) - - # Log the results of the evaluation. - elapsed_time = time.time() - evaluation_start_timer - episode_return = jnp.mean(episode_metrics["episode_return"]) - - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - - if save_checkpoint: - # Save checkpoint of learner state - checkpointer.save( - timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), - episode_return=episode_return, - ) - - if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(learner_output.learner_state.params) - max_episode_return = episode_return - - # Update runner state to continue training. - learner_state = learner_output.learner_state - - # Record the performance for the final evaluation run. - eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) - - # Measure absolute metric. - if config.arch.absolute_metric: - start_time = time.time() - - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) - - elapsed_time = time.time() - start_time - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) - - # Stop the logger. - logger.stop() - - return eval_performance - - - -@hydra.main(config_path="../../../configs", config_name="default_rec_ippo_seb.yaml", version_base="1.2") -def hydra_entry_point(cfg: DictConfig) -> float: - """Experiment entry point.""" - # Allow dynamic attributes. - OmegaConf.set_struct(cfg, False) - - # Run experiment. - eval_performance = run_experiment(cfg) - print(f"{Fore.CYAN}{Style.BRIGHT}IPPO experiment completed{Style.RESET_ALL}") - return eval_performance - - -if __name__ == "__main__": - hydra_entry_point() - -#learner_output.episode_metrics.keys() -#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file diff --git a/mava/systems/sebulba/ppo/test.py b/mava/systems/sebulba/ppo/test.py deleted file mode 100644 index d1f34fccf..000000000 --- a/mava/systems/sebulba/ppo/test.py +++ /dev/null @@ -1,86 +0,0 @@ - -import copy -import time -from typing import Any, Dict, Tuple, List -import threading -import chex -import flax -import gym.vector -import gym.vector.async_vector_env -import hydra -import jax -import jax.numpy as jnp -import numpy as np -import optax -import queue -from collections import deque -from colorama import Fore, Style -from flax.core.frozen_dict import FrozenDict -from omegaconf import DictConfig, OmegaConf -from optax._src.base import OptState -from rich.pretty import pprint - -#from mava.evaluator import make_eval_fns -from mava.networks import FeedForwardActor as Actor -from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition #todo: change this -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation -from mava.utils import make_env as environments -from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import ( - merge_leading_dims, - unreplicate_batch_dim, - unreplicate_n_dims, -) -from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps -from mava.utils.training import make_learning_rate -from mava.wrappers.episode_metrics import get_final_step_metrics -from flax import linen as nn -import gym -import rware -import lbforaging -from mava.wrappers import GymRwareWrapper, GymRecordEpisodeMetrics, _multiagent_worker_shared_memory, GymAgentIDWrapper, GymLBFWrapper -@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") -def hydra_entry_point(cfg: DictConfig) -> float: - """Experiment entry point.""" - # Allow dynamic attributes. - - - OmegaConf.set_struct(cfg, False) - def f(): - base = gym.make(cfg.env.scenario) - base = GymLBFWrapper(base, cfg.env.use_individual_rewards, True) - base = GymAgentIDWrapper(base) - return GymRecordEpisodeMetrics(base) - - base = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names - [ - lambda: f() - for _ in range(3) - ], - worker=_multiagent_worker_shared_memory - ) - base.reset() - n = 0 - done = False - r = [0] * 3 - while not done: - n+= 1 - agents_view, reward, terminated, truncated, info = base.step([r, r]) - print(terminated, truncated) - done = np.logical_or(terminated, truncated).all() - print(n, done) - #metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - base.close() - print(done) - - - #print(b) - #r = 1+1 - # Create a sample input - #env = gym.make(cfg.env.scenario) - #env.reset() - #a = env.step(jnp.ones((4))) - -hydra_entry_point() diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index b329241d9..dd77105a9 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -12,23 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import warnings -from typing import Dict, Tuple, Optional +from typing import Any, Callable, Dict, Optional, Tuple import gym import numpy as np -from numpy.typing import NDArray - from gym import spaces from gym.vector.utils import write_to_shared_memory -import sys +from numpy.typing import NDArray # Filter out the warnings warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") -class GymGenericWrapper(gym.Wrapper): - """Wrapper for rware gym environments""" +class GymRwareWrapper(gym.Wrapper): + """Wrapper for rware gym environments.""" def __init__( self, @@ -37,7 +36,6 @@ def __init__( add_global_state: bool = False, ): """Initialize the gym wrapper - Args: env (gym.env): gym env instance. use_individual_rewards (bool, optional): Use individual or group rewards. @@ -45,30 +43,26 @@ def __init__( add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) - self._env = env + self._env = env self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[ - 0 - ].n - - def reset( - self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple: - + self.num_actions = self._env.action_space[0].n + + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + if seed is not None: self.env.seed(seed) - - agents_view, info = self._env.reset() + + agents_view, info = self._env.reset() info = {"actions_mask": self.get_actions_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) - + return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple: agents_view, reward, terminated, truncated, info = self._env.step(actions) @@ -80,7 +74,7 @@ def step(self, actions: NDArray) -> Tuple: reward = np.array(reward) else: reward = np.array([np.array(reward).mean()] * self.num_agents) - + return agents_view, reward, terminated, truncated, info def get_actions_mask(self, info: Dict) -> NDArray: @@ -88,13 +82,9 @@ def get_actions_mask(self, info: Dict) -> NDArray: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - def get_global_obs(self, obs: NDArray): + def get_global_obs(self, obs: NDArray) -> NDArray: global_obs = np.concatenate(obs, axis=0) return np.tile(global_obs, (self.num_agents, 1)) - - - - class GymRecordEpisodeMetrics(gym.Wrapper): @@ -117,14 +107,14 @@ def reset(self) -> Tuple: "episode_length": self.running_count_episode_length, "is_terminal_step": True, } - + # Reset the metrics self.running_count_episode_return = 0.0 self.running_count_episode_length = 0 - + if "won_episode" in info: metrics["won_episode"] = info["won_episode"] - + info["metrics"] = metrics return agents_view, info @@ -140,17 +130,18 @@ def step(self, actions: NDArray) -> Tuple: metrics = { "episode_return": self.running_count_episode_return, "episode_length": self.running_count_episode_length, - "is_terminal_step": False, # We handle the True case in the reset function since this gets overwritten + "is_terminal_step": False, } if "won_episode" in info: metrics["won_episode"] = info["won_episode"] - + info["metrics"] = metrics - + return agents_view, reward, terminated, truncated, info - + + class GymAgentIDWrapper(gym.Wrapper): - """Add onehot agent IDs to observation.""" + """Add one hot agent IDs to observation.""" def __init__(self, env: gym.Env): super().__init__(env) @@ -164,7 +155,9 @@ def __init__(self, env: gym.Env): observation_space.shape, ) _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) - _observation_boxs = [spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype)] * self.env.num_agents + _observation_boxs = [ + spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) + ] * self.env.num_agents self.observation_space = spaces.Tuple(_observation_boxs) def reset(self) -> Tuple[np.ndarray, Dict]: @@ -178,9 +171,18 @@ def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: obs, reward, terminated, truncated, info = self.env.step(action) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info - -def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + +# Copied form https://github.com/openai/gym/blob/master/gym/vector/async_vector_env.py +# Modified to work with multiple agents +def _multiagent_worker_shared_memory( # noqa: CCR001 + index: int, + env_fn: Callable[[], Any], + pipe: Any, + parent_pipe: Any, + shared_memory: Any, + error_queue: Any, +) -> None: assert shared_memory is not None env = env_fn() observation_space = env.observation_space @@ -190,9 +192,7 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me command, data = pipe.recv() if command == "reset": observation, info = env.reset(**data) - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) pipe.send(((None, info), True)) elif command == "step": @@ -203,14 +203,13 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me truncated, info, ) = env.step(data) + # Handel the dones across all of envs and agents if np.logical_or(terminated, truncated).all(): old_observation, old_info = observation, info observation, info = env.reset() info["final_observation"] = old_observation info["final_info"] = old_info - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) pipe.send(((None, reward, terminated, truncated, info), True)) elif command == "seed": env.seed(data) @@ -235,9 +234,7 @@ def _multiagent_worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_me setattr(env, name, value) pipe.send((None, True)) elif command == "_check_spaces": - pipe.send( - ((data[0] == observation_space, data[1] == env.action_space), True) - ) + pipe.send(((data[0] == observation_space, data[1] == env.action_space), True)) else: raise RuntimeError( f"Received unknown command `{command}`. Must " From e5dd71bf35c22df29a58e0267597fbf58d254040 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 10 Jul 2024 15:18:57 +0100 Subject: [PATCH 034/139] chore: pre-commits --- mava/configs/arch/sebulba.yaml | 1 - mava/evaluator.py | 167 +++++++------- mava/systems/anakin/ppo/ff_ippo.py | 4 +- mava/systems/anakin/ppo/ff_mappo.py | 4 +- mava/systems/sebulba/ppo/ff_ippo.py | 327 ++++++++++++++++----------- mava/types.py | 7 +- mava/utils/make_env.py | 16 +- mava/utils/total_timestep_checker.py | 4 +- mava/wrappers/__init__.py | 7 +- mava/wrappers/episode_metrics.py | 2 +- mava/wrappers/gym.py | 2 +- 11 files changed, 310 insertions(+), 231 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index fd555f71e..b6a0a9699 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -15,4 +15,3 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 1 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices - diff --git a/mava/evaluator.py b/mava/evaluator.py index ca0c8c9a7..984a42377 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Tuple, Union +from typing import Any, Callable, Dict, Optional, Tuple, Union import chex import flax.linen as nn import jax import jax.numpy as jnp +import numpy as np from flax.core.frozen_dict import FrozenDict from jumanji.env import Environment from omegaconf import DictConfig @@ -27,13 +28,13 @@ EvalFn, EvalState, ExperimentOutput, + Observation, RecActorApply, RNNEvalState, + RNNObservation, + SebulbaEvalFn, ) -from mava.types import Observation - -import numpy as np def get_anakin_ff_evaluator_fn( env: Environment, @@ -348,7 +349,7 @@ def get_sebulba_ff_evaluator_fn( apply_fn: ActorApply, config: DictConfig, log_win_rate: bool = False, -) -> EvalFn: +) -> SebulbaEvalFn: """Get the evaluator function for feedforward networks. Args: @@ -356,63 +357,69 @@ def get_sebulba_ff_evaluator_fn( apply_fn (callable): Network forward pass method. config (dict): Experiment configuration. """ + @jax.jit - def get_action( #todo explicetly put these on the learner? they should already be there + def get_action( # todo explicetly put these on the learner? they should already be there params: FrozenDict, observation: Observation, key: chex.PRNGKey, - ) -> Tuple: + ) -> chex.Array: """Get action.""" - + pi = apply_fn(params, observation) - + if config.arch.evaluation_greedy: action = pi.mode() else: action = pi.sample(seed=key) return action - def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: - - - + + def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: + obs, info = env.reset() - dones = np.zeros(env.num_envs) # todo: jnp or np? - eval_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - + dones = np.full(env.num_envs, False) + eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + while not dones.all(): - + key, policy_key = jax.random.split(key) - - obs = jax.device_put(jnp.stack(obs, axis = 1)) - action_mask = jax.device_put(np.stack(info["actions_mask"]) ) - + + obs = jax.device_put(jnp.stack(obs, axis=1)) + action_mask = jax.device_put(np.stack(info["actions_mask"])) + actions = get_action(params, Observation(obs, action_mask), policy_key) cpu_action = jax.device_get(actions) - obs, reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) - - next_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - + obs, reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0, 1)) + + next_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + next_dones = next_metrics["is_terminal_step"] - - update_metric = lambda old_metric, new_metric : np.where(np.logical_and(next_dones, dones == False), new_metric, old_metric) - eval_metrics = jax.tree_map(update_metric, eval_metrics, next_metrics) - - dones = np.logical_or(dones, next_dones) + + update_flags = np.logical_and(next_dones, np.invert(dones)) + + update_metrics = lambda new_metric, old_metric, update_flags=update_flags: np.where( + (update_flags), new_metric, old_metric + ) + + eval_metrics = jax.tree_map(update_metrics, next_metrics, eval_metrics) + + dones = np.logical_or(dones, next_dones) eval_metrics.pop("is_terminal_step") return eval_metrics - + return eval_episodes + def get_sebulba_rnn_evaluator_fn( env: Environment, apply_fn: RecActorApply, config: DictConfig, scanned_rnn: nn.Module, log_win_rate: bool = False, -) -> EvalFn: +) -> SebulbaEvalFn: """Get the evaluator function for feedforward networks. Args: @@ -420,76 +427,82 @@ def get_sebulba_rnn_evaluator_fn( apply_fn (callable): Network forward pass method. config (dict): Experiment configuration. """ + @jax.jit - def get_action( #todo explicetly put these on the learner? they should already be there + def get_action( # todo explicetly put these on the learner? they should already be there params: FrozenDict, - observation: Observation, - hstate : chex.Array, + observation: RNNObservation, + hstate: chex.Array, key: chex.PRNGKey, - ) -> Tuple: + ) -> Tuple[chex.Array, chex.Array]: """Get action.""" - + hstate, pi = apply_fn(params, hstate, observation) - + if config.arch.evaluation_greedy: action = pi.mode() else: action = pi.sample(seed=key) return action, hstate - def eval_episodes(params: FrozenDict, key : chex.PRNGKey) -> Dict: - - - + + def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: + obs, info = env.reset() - eval_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - + eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + hstate = scanned_rnn.initialize_carry( - (env.num_envs, config.system.num_agents), config.network.hidden_state_dim + (env.num_envs, config.system.num_agents), config.network.hidden_state_dim ) - - dones = jnp.zeros((env.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) - + + dones = jnp.full((env.num_envs, config.system.num_agents), False) + while not dones.all(): - + key, policy_key = jax.random.split(key) - - obs = jax.device_put(jnp.stack(obs, axis = 1)) - action_mask = jax.device_put(np.stack(info["actions_mask"]) ) - - obs, action_mask, dones = jax.tree_map(lambda x : x[jnp.newaxis, :], (obs, action_mask, dones)) - - - actions, hstate = get_action(params, (Observation(obs, action_mask), dones), hstate, policy_key) + + obs = jax.device_put(jnp.stack(obs, axis=1)) + action_mask = jax.device_put(np.stack(info["actions_mask"])) + + obs, action_mask, dones = jax.tree_map( + lambda x: x[jnp.newaxis, :], (obs, action_mask, dones) + ) + + actions, hstate = get_action( + params, (Observation(obs, action_mask), dones), hstate, policy_key + ) cpu_action = jax.device_get(actions) - obs, reward, terminated, truncated, info = env.step(cpu_action[0].swapaxes(0,1)) - - next_metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) - + obs, reward, terminated, truncated, info = env.step(cpu_action[0].swapaxes(0, 1)) + + next_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + next_dones = np.logical_or(terminated, truncated) - - per_env_done = np.all(np.logical_and(next_dones, dones[0] == False),axis = 1) - - update_metric = lambda old_metric, new_metric : np.where(per_env_done, new_metric, old_metric) - eval_metrics = jax.tree_map(update_metric, eval_metrics, next_metrics) - - dones = np.logical_or(dones, next_dones) + + update_flags = np.all(np.logical_and(next_dones, np.invert(dones[0])), axis=1) + + update_metrics = lambda new_metric, old_metric, update_flags=update_flags: np.where( + (update_flags), new_metric, old_metric + ) + + eval_metrics = jax.tree_map(update_metrics, next_metrics, eval_metrics) + + dones = np.logical_or(dones, next_dones) eval_metrics.pop("is_terminal_step") return eval_metrics - + return eval_episodes def make_sebulba_eval_fns( - eval_env_fn: callable, + eval_env_fn: Callable, network_apply_fn: Union[ActorApply, RecActorApply], config: DictConfig, - add_global_state : bool = False, + add_global_state: bool = False, use_recurrent_net: bool = False, scanned_rnn: Optional[nn.Module] = None, -) -> Tuple[EvalFn, EvalFn]: +) -> Tuple[SebulbaEvalFn, SebulbaEvalFn]: """Initialize evaluator functions for reinforcement learning. Args: @@ -501,14 +514,16 @@ def make_sebulba_eval_fns( Required if `use_recurrent_net` is True. Defaults to None. Returns: - Tuple[EvalFn, EvalFn]: A tuple of two evaluation functions: + Tuple[SebulbaEvalFn, SebulbaEvalFn]: A tuple of two evaluation functions: one for use during training and one for absolute metrics. Raises: AssertionError: If `use_recurrent_net` is True but `scanned_rnn` is not provided. """ - eval_env, absolute_eval_env = eval_env_fn(config, config.arch.num_eval_episodes, add_global_state = add_global_state), eval_env_fn(config, config.arch.num_eval_episodes * 10, add_global_state = add_global_state) - + eval_env, absolute_eval_env = eval_env_fn( + config, config.arch.num_eval_episodes, add_global_state=add_global_state + ), eval_env_fn(config, config.arch.num_eval_episodes * 10, add_global_state=add_global_state) + # Check if win rate is required for evaluation. log_win_rate = config.env.log_win_rate # Vmap it over number of agents and create evaluator_fn. @@ -536,4 +551,4 @@ def make_sebulba_eval_fns( absolute_eval_env, network_apply_fn, config, log_win_rate # type: ignore ) - return evaluator, absolute_metric_evaluator \ No newline at end of file + return evaluator, absolute_metric_evaluator diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index f0803de4d..408bdf36d 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -462,7 +462,9 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor_network.apply, config) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns( + eval_env, actor_network.apply, config + ) # Calculate total timesteps. config = anakin_check_total_timesteps(config) diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py index 90fad5767..93d3f2c0b 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -459,7 +459,9 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. eval_keys = jax.random.split(key_e, n_devices) - evaluator, absolute_metric_evaluator = make_anakin_eval_fns(eval_env, actor_network.apply, config) + evaluator, absolute_metric_evaluator = make_anakin_eval_fns( + eval_env, actor_network.apply, config + ) # Calculate total timesteps. config = anakin_check_total_timesteps(config) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 153f9e4a9..cf598770f 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -13,9 +13,12 @@ # limitations under the License. import copy -import time -from typing import Any, Dict, Tuple, List +import queue import threading +import time +from collections import deque +from typing import Any, Dict, List, Tuple + import chex import flax import hydra @@ -24,46 +27,47 @@ import jax.numpy as jnp import numpy as np import optax -import queue -from collections import deque from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict from omegaconf import DictConfig, OmegaConf from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_sebulba_eval_fns as make_eval_fns +from mava.evaluator import make_sebulba_eval_fns as make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition -from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, Observation +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.types import ( + ActorApply, + CriticApply, + ExperimentOutput, + Observation, + SebulbaLearnerFn, +) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import ( - merge_leading_dims, - unreplicate_batch_dim, - unreplicate_n_dims, -) +from mava.utils.jax_utils import merge_leading_dims, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.total_timestep_checker import sebulba_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics -def rollout( +def rollout( key: chex.PRNGKey, config: DictConfig, rollout_queue: queue.Queue, params_queue: queue.Queue, apply_fns: Tuple, learner_devices: List, - actor_device_id : int): - - #setup + actor_device_id: int, +) -> None: + + # setup env = environments.make_gym_env(config, config.arch.num_envs) current_actor_device = jax.devices()[actor_device_id] actor_apply_fn, critic_apply_fn = apply_fns - + # Define the util functions: select action function and prepare data to share it with learner. @jax.jit def get_action_and_value( @@ -73,8 +77,8 @@ def get_action_and_value( ) -> Tuple: """Get action and value.""" key, subkey = jax.random.split(key) - - actor_policy = actor_apply_fn(params.actor_params, observation) # TODO: check vmapiing + + actor_policy = actor_apply_fn(params.actor_params, observation) # TODO: check vmapiing action = actor_policy.sample(seed=subkey) log_prob = actor_policy.log_prob(action) @@ -85,35 +89,43 @@ def get_action_and_value( params_queue_get_time: deque = deque(maxlen=1) rollout_time: deque = deque(maxlen=1) rollout_queue_put_time: deque = deque(maxlen=1) - - next_obs , info = env.reset() + + next_obs, info = env.reset() next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) - - move_to_device = lambda x : jax.device_put(x, device = current_actor_device) + + move_to_device = lambda x: jax.device_put(x, device=current_actor_device) + + shard_split_payload = lambda x, axis: jax.device_put_sharded( + jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices + ) # Loop till the learner has finished training - for update in range(config.system.num_updates): + for _update in range(config.system.num_updates): inference_time: float = 0 storage_time: float = 0 env_send_time: float = 0 - + # Get the latest parameters from the learner params_queue_get_time_start = time.time() params = params_queue.get() params_queue_get_time.append(time.time() - params_queue_get_time_start) - - # Rollout + + # Rollout rollout_time_start = time.time() storage: List = [] # Loop over the rollout length for _ in range(0, config.system.rollout_length): - + # Cached for transition - cached_next_obs = move_to_device(jnp.stack(next_obs, axis = 1)) # (num_envs, num_agents, ...) - cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) - cashed_action_mask = move_to_device(np.stack(info["actions_mask"])) # (num_envs, num_agents, num_actions) - + cached_next_obs = move_to_device( + jnp.stack(next_obs, axis=1) + ) # (num_envs, num_agents, ...) + cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) + cashed_action_mask = move_to_device( + np.stack(info["actions_mask"]) + ) # (num_envs, num_agents, num_actions) + full_observation = Observation(cached_next_obs, cashed_action_mask) # Get action and value inference_time_start = time.time() @@ -123,20 +135,21 @@ def get_action_and_value( value, key, ) = get_action_and_value(params, full_observation, key) - - + # Step the environment inference_time += time.time() - inference_time_start env_send_time_start = time.time() cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0,1)) # (num_env, num_agents) --> (num_agents, num_env) + next_obs, next_reward, terminated, truncated, info = env.step( + cpu_action.swapaxes(0, 1) + ) # (num_env, num_agents) --> (num_agents, num_env) env_send_time += time.time() - env_send_time_start - + # Prepare the data storage_time_start = time.time() - next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_map(lambda *x : jnp.asarray(x), *info["metrics"]) # Stack the metrics - + next_dones = np.logical_or(terminated, truncated) + metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) # Stack the metrics + # Append data to storage storage.append( PPOTransition( @@ -146,68 +159,75 @@ def get_action_and_value( reward=next_reward, log_prob=log_prob, obs=full_observation, - info=metrics, - ) + info=metrics, + ) ) storage_time += time.time() - storage_time_start - rollout_time.append(time.time() - rollout_time_start) - + rollout_time.append(time.time() - rollout_time_start) + parse_timer = time.time() - - # Prepare data to share with learner - #[PPOTransition() * rollout_len] --> PPOTransition[done = (rollout_len, num_envs, num_agents), action = (rollout_len, num_envs, num_agents, num_actions), ...] - stacked_storage = jax.tree_map( lambda *xs : jnp.stack(xs), *storage) - + + # Prepare data to share with learner + # [PPOTransition() * rollout_len] --> PPOTransition[done=(rollout_len, num_envs, num_agents) + # , action=(rollout_len, num_envs, num_agents, num_actions), ...] + stacked_storage = jax.tree_map(lambda *xs: jnp.stack(xs), *storage) # Split the arrays over the different learner_devices on the num_envs axis - shard_split_payload= lambda x, axis : jax.device_put_sharded(jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices) - sharded_storage = jax.tree_map(lambda x : shard_split_payload(x, 1) , stacked_storage) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) - + sharded_storage = jax.tree_map( + lambda x: shard_split_payload(x, 1), stacked_storage + ) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) + # (num_learner_devices, num_envs, num_agents, ...) - sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis = 1), 0) - sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) + sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis=1), 0) + sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) sharded_next_done = shard_split_payload(next_dones, 0) - + # Pack the obs and action mask payload_obs = Observation(sharded_next_obs, sharded_next_action_mask) # For debugging - speed_info = { + speed_info = { # noqa F841 "rollout_time": np.mean(rollout_time), "params_queue_get_time": np.mean(params_queue_get_time), "action_inference": inference_time, "storage_time": storage_time, "env_step_time": env_send_time, - "rollout_queue_put_time": np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0, - "parse_time" : time.time() - parse_timer, - } - #print(speed_info) - + "rollout_queue_put_time": ( + np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0 + ), + "parse_time": time.time() - parse_timer, + } + payload = ( sharded_storage, payload_obs, sharded_next_done, ) - + # Put data in the rollout queue to share it with the learner rollout_queue_put_time_start = time.time() rollout_queue.put(payload) rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) - + def get_learner_fn( apply_fns: Tuple[ActorApply, CriticApply], update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], config: DictConfig, -) -> LearnerFn[LearnerState]: +) -> SebulbaLearnerFn[LearnerState, PPOTransition]: """Get the learner function.""" # Get apply and update functions for actor and critic networks. actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns - def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: Observation, last_dones : chex.Array) -> Tuple[LearnerState, Tuple]: + def _update_step( + learner_state: LearnerState, + traj_batch: PPOTransition, + last_obs: Observation, + last_dones: chex.Array, + ) -> Tuple[LearnerState, Tuple]: """A single update of the network. This function steps the environment and records the trajectory batch for @@ -225,7 +245,7 @@ def _update_step(learner_state: LearnerState, traj_batch : PPOTransition, last_o _ (Any): The current metrics info. """ - def _calculate_gae( #todo: lake sure this is appropriate + def _calculate_gae( # todo: lake sure this is appropriate traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array ) -> Tuple[chex.Array, chex.Array]: def _get_advantages( @@ -246,7 +266,7 @@ def _get_advantages( unroll=16, ) return advantages, advantages + traj_batch.value - + # CALCULATE ADVANTAGE params, opt_states, key, _, _ = learner_state last_val = critic_apply_fn(params.critic_params, last_obs) @@ -337,7 +357,8 @@ def _critic_loss_fn( # available at https://tinyurl.com/26tdzs5x # pmean over devices. actor_grads, actor_loss_info = jax.lax.pmean( - (actor_grads, actor_loss_info), axis_name="device" #todo: pmean over learner devices not all + (actor_grads, actor_loss_info), + axis_name="device", # todo: pmean over learner devices not all ) # pmean over devices. @@ -376,7 +397,12 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) # SHUFFLE MINIBATCHES - batch_size = config.system.rollout_length * (config.arch.num_envs // len(config.arch.learner_device_ids)) * len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor + batch_size = ( + config.system.rollout_length + * (config.arch.num_envs // len(config.arch.learner_device_ids)) + * len(config.arch.executor_device_ids) + * config.arch.n_threads_per_executor + ) permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) @@ -406,7 +432,12 @@ def _critic_loss_fn( metric = traj_batch.info return learner_state, (metric, loss_info) - def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs: chex.Array, last_dones : chex.Array) -> ExperimentOutput[LearnerState]: + def learner_fn( + learner_state: LearnerState, + traj_batch: PPOTransition, + last_obs: chex.Array, + last_dones: chex.Array, + ) -> ExperimentOutput[LearnerState]: """Learner function. This function represents the learner, it updates the network parameters @@ -423,7 +454,9 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs """ # todo: add update_batch_size - learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch , last_obs, last_dones) + learner_state, (episode_info, loss_info) = _update_step( + learner_state, traj_batch, last_obs, last_dones + ) return ExperimentOutput( learner_state=learner_state, @@ -436,15 +469,17 @@ def learner_fn(learner_state: LearnerState, traj_batch : PPOTransition, last_obs def learner_setup( keys: chex.Array, config: DictConfig, learner_devices: List -) -> Tuple[LearnerFn[LearnerState], Actor, LearnerState]: +) -> Tuple[ + SebulbaLearnerFn[LearnerState, PPOTransition], Tuple[ActorApply, CriticApply], LearnerState +]: """Initialise learner_fn, network, optimiser, environment and states.""" - - #create temporory envoirnments. - env = environments.make_gym_env(config, config.arch.num_envs) + + # create temporory envoirnments. + env = environments.make_gym_env(config, config.arch.num_envs) # Get number of agents and actions. action_space = env.single_action_space config.system.num_agents = len(action_space) - config.system.num_actions = action_space[0].n + config.system.num_actions = action_space[0].n # PRNG keys. key, actor_net_key, critic_net_key = keys @@ -493,7 +528,7 @@ def learner_setup( # Get batched iterated update and replicate it to pmap it over learner cores. learn = get_learner_fn(apply_fns, update_fns, config) - learn = jax.pmap(learn, axis_name="device", devices = learner_devices) + learn = jax.pmap(learn, axis_name="device", devices=learner_devices) # Load model from checkpoint if specified. if config.logger.checkpointing.load_model: @@ -522,49 +557,54 @@ def learner_setup( return learn, apply_fns, init_learner_state -def run_experiment(_config: DictConfig) -> float: +def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 """Runs experiment.""" config = copy.deepcopy(_config) - devices = jax.devices() + devices = jax.devices() learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] # PRNG keys. key, key_e, actor_net_key, critic_net_key = jax.random.split( jax.random.PRNGKey(config.system.seed), num=4 ) - + # Sanity check of config assert ( config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must to be divisible by the number of learners " - + ), "The number of environments must to be divisible by the number of learners " + assert ( int(config.arch.num_envs / len(config.arch.learner_device_ids)) * config.arch.n_threads_per_executor % config.system.num_minibatches == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" - # Setup learner. - learn, apply_fns , learner_state = learner_setup( - (key ,actor_net_key, critic_net_key), config, learner_devices + learn, apply_fns, learner_state = learner_setup( + (key, actor_net_key, critic_net_key), config, learner_devices ) # Setup evaluator. # One key per device for evaluation. - evaluator, absolute_metric_evaluator = make_eval_fns(environments.make_gym_env, apply_fns[0], config) #todo: make this more generic + evaluator, absolute_metric_evaluator = make_eval_fns( + environments.make_gym_env, apply_fns[0], config + ) # todo: make this more generic # Calculate total timesteps. - config = sebulba_check_total_timesteps(config) + config = sebulba_check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." # Calculate number of updates per evaluation. - config.system.num_updates_per_eval, remaining_updates = divmod(config.system.num_updates , config.arch.num_evaluation) - config.arch.num_evaluation += (remaining_updates != 0) # Add an evaluation step if the num_updates is not a multiple of num_evaluation + config.system.num_updates_per_eval, remaining_updates = divmod( + config.system.num_updates, config.arch.num_evaluation + ) + config.arch.num_evaluation += ( + remaining_updates != 0 + ) # Add an evaluation step if the num_updates is not a multiple of num_evaluation steps_per_rollout = ( len(config.arch.executor_device_ids) * config.arch.n_threads_per_executor @@ -587,18 +627,18 @@ def run_experiment(_config: DictConfig) -> float: model_name=config.logger.system_name, **config.logger.checkpointing.save_args, # Checkpoint args ) - + # Executor setup and launch. unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) params_queues: List = [] rollout_queues: List = [] - for d_idx, d_id in enumerate( # Loop through each executor device + for _d_idx, d_id in enumerate( # Loop through each executor device config.arch.executor_device_ids ): # Replicate params per executor device device_params = jax.device_put(unreplicated_params, devices[d_id]) # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): + for _thread_id in range(config.arch.n_threads_per_executor): params_queues.append(queue.Queue(maxsize=1)) rollout_queues.append(queue.Queue(maxsize=1)) params_queues[-1].put(device_params) @@ -613,27 +653,30 @@ def run_experiment(_config: DictConfig) -> float: learner_devices, d_id, ), - ).start() #todo : Use a process instead of a thread? threads are limited by pything's GIL and they only run on a single core , processes have a bogger overhead (max num_env for optimal performance?) - - + ).start() + # Run experiment for the total number of updates. max_episode_return = jnp.float32(0.0) best_params = None - for eval_step in range(config.arch.num_evaluation): + for eval_step in range(config.arch.num_evaluation): training_start_time = time.time() learner_speeds = [] rollout_times = [] - + episode_metrics = [] train_metrics = [] - - # Make sure that the - num_updates_in_eval = config.system.num_updates_per_eval if eval_step != config.arch.num_evaluation - 1 else remaining_updates - for update in range(num_updates_in_eval): + + # Make sure that the + num_updates_in_eval = ( + config.system.num_updates_per_eval + if eval_step != config.arch.num_evaluation - 1 + else remaining_updates + ) + for _update in range(num_updates_in_eval): sharded_storages = [] sharded_next_obss = [] sharded_next_dones = [] - + rollout_start_time = time.time() # Loop through each executor device for d_idx, _ in enumerate(config.arch.executor_device_ids): @@ -648,24 +691,28 @@ def run_experiment(_config: DictConfig) -> float: sharded_storages.append(sharded_storage) sharded_next_obss.append(sharded_next_obs) sharded_next_dones.append(sharded_next_done) - + rollout_times.append(time.time() - rollout_start_time) - - - # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 2), *sharded_storages) - sharded_next_obss = jax.tree_map(lambda *x : jnp.concatenate(x, axis = 1), *sharded_next_obss) - sharded_next_dones = jnp.concatenate(sharded_next_dones, axis = 1) + # Concatinate the returned trajectories on the n_env axis + sharded_storages = jax.tree_map( + lambda *x: jnp.concatenate(x, axis=2), *sharded_storages + ) + sharded_next_obss = jax.tree_map( + lambda *x: jnp.concatenate(x, axis=1), *sharded_next_obss + ) + sharded_next_dones = jnp.concatenate(sharded_next_dones, axis=1) learner_start_time = time.time() - learner_output = learn(learner_state, sharded_storages, sharded_next_obss, sharded_next_dones) + learner_output = learn( + learner_state, sharded_storages, sharded_next_obss, sharded_next_dones + ) learner_speeds.append(time.time() - learner_start_time) - + # Stack the metrics episode_metrics.append(learner_output.episode_metrics) train_metrics.append(learner_output.train_metrics) - + # Send updated params to executors unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) for d_idx, d_id in enumerate(config.arch.executor_device_ids): @@ -675,28 +722,33 @@ def run_experiment(_config: DictConfig) -> float: device_params ) - - # Log the results of the training. elapsed_time = time.time() - training_start_time t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics = jax.tree_map(lambda *x : np.asarray(x), *episode_metrics) - episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - + episode_metrics = jax.tree_map(lambda *x: np.asarray(x), *episode_metrics) + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + # Separately log timesteps, actoring metrics and training metrics. - speed_info = {"total_time" : elapsed_time, "rollout_time" : np.sum(rollout_times), "learner_time" : np.sum(learner_speeds), "timestep" : t} - logger.log(speed_info , t, eval_step, LogEvent.MISC) + speed_info = { + "total_time": elapsed_time, + "rollout_time": np.sum(rollout_times), + "learner_time": np.sum(learner_speeds), + "timestep": t, + } + logger.log(speed_info, t, eval_step, LogEvent.MISC) if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - train_metrics = jax.tree_map(lambda *x : np.asarray(x), *train_metrics) + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + train_metrics = jax.tree_map(lambda *x: np.asarray(x), *train_metrics) logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - # Evaluation on the learner + # Evaluation on the learner evaluation_start_timer = time.time() key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = evaluator(unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1 ), eval_key) - + episode_metrics = evaluator( + unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1), eval_key + ) + # Log the results of the evaluation. elapsed_time = time.time() - evaluation_start_timer episode_return = jnp.mean(episode_metrics["episode_return"]) @@ -704,7 +756,7 @@ def run_experiment(_config: DictConfig) -> float: steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - + if save_checkpoint: # Save checkpoint of learner state checkpointer.save( @@ -712,15 +764,15 @@ def run_experiment(_config: DictConfig) -> float: unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), episode_return=episode_return, ) - + if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(learner_output.learner_state.params) + best_params = copy.deepcopy(learner_output.learner_state.params.actor_params) max_episode_return = episode_return - + # Update runner state to continue training. learner_state = learner_output.learner_state - - # Record the performance for the final evaluation run. + + # Record the performance for the final evaluation run. eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) # Measure absolute metric. @@ -728,11 +780,11 @@ def run_experiment(_config: DictConfig) -> float: start_time = time.time() key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params.actor_params, 1), eval_key) + episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params, 1), eval_key) elapsed_time = time.time() - start_time steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - + t = int(steps_per_rollout * (eval_step + 1)) episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) @@ -743,8 +795,9 @@ def run_experiment(_config: DictConfig) -> float: return eval_performance - -@hydra.main(config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2") +@hydra.main( + config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2" +) def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. @@ -759,5 +812,5 @@ def hydra_entry_point(cfg: DictConfig) -> float: if __name__ == "__main__": hydra_entry_point() -#learner_output.episode_metrics.keys() -#dict_keys(['episode_length', 'episode_return']) \ No newline at end of file +# learner_output.episode_metrics.keys() +# dict_keys(['episode_length', 'episode_return']) diff --git a/mava/types.py b/mava/types.py index c6a2cf6aa..02d2bae90 100644 --- a/mava/types.py +++ b/mava/types.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Dict, Generic, Tuple, TypeVar, Optional +from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar import chex from flax.core.frozen_dict import FrozenDict @@ -81,6 +81,7 @@ class RNNEvalState(NamedTuple): # `MavaState` is the main type passed around in our systems. It is often used as a scan carry. # Types like: `EvalState` | `LearnerState` (mava/systems//types.py) are `MavaState`s. MavaState = TypeVar("MavaState") +MavaTransition = TypeVar("MavaTransition") class ExperimentOutput(NamedTuple, Generic[MavaState]): @@ -92,7 +93,11 @@ class ExperimentOutput(NamedTuple, Generic[MavaState]): LearnerFn = Callable[[MavaState], ExperimentOutput[MavaState]] +SebulbaLearnerFn = Callable[ + [MavaState, MavaTransition, chex.Array, chex.Array], ExperimentOutput[MavaState] +] EvalFn = Callable[[FrozenDict, chex.PRNGKey], ExperimentOutput[MavaState]] +SebulbaEvalFn = Callable[[FrozenDict, chex.PRNGKey], Dict] ActorApply = Callable[[FrozenDict, Observation], Distribution] CriticApply = Callable[[FrozenDict, Observation], Value] diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index df769d8c7..2330674f0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -22,7 +22,6 @@ import jumanji import matrax from gigastep import ScenarioBuilder -import lbforaging from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment from jumanji.environments.routing.cleaner.generator import ( @@ -45,16 +44,16 @@ CleanerWrapper, ConnectorWrapper, GigastepWrapper, + GymAgentIDWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, - GymAgentIDWrapper, - _multiagent_worker_shared_memory, LbfWrapper, MabraxWrapper, MatraxWrapper, RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, + _multiagent_worker_shared_memory, ) # Registry mapping environment names to their generator and wrapper classes. @@ -211,7 +210,9 @@ def make_gigastep_env( def make_gym_env( - config: DictConfig, num_env : int, add_global_state: bool = False, + config: DictConfig, + num_env: int, + add_global_state: bool = False, ) -> Environment: # todo : create the appropriate annotation for the sync vector """ Create a Gym environment. @@ -238,11 +239,8 @@ def create_gym_env( return wrapped_env envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names - [ - lambda: create_gym_env(config, add_global_state) - for _ in range(num_env) - ], - worker=_multiagent_worker_shared_memory + [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], + worker=_multiagent_worker_shared_memory, ) return envs diff --git a/mava/utils/total_timestep_checker.py b/mava/utils/total_timestep_checker.py index fd90b7436..744451d1b 100644 --- a/mava/utils/total_timestep_checker.py +++ b/mava/utils/total_timestep_checker.py @@ -68,7 +68,7 @@ def sebulba_check_total_timesteps(config: DictConfig) -> DictConfig: // config.system.rollout_length // config.arch.num_envs // config.arch.n_threads_per_executor - // len(config.arch.executor_device_ids) + // len(config.arch.executor_device_ids) ) print( f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " @@ -76,4 +76,4 @@ def sebulba_check_total_timesteps(config: DictConfig) -> DictConfig: + " for a specific number of updates, please set total_timesteps to None!" + f"{Style.RESET_ALL}" ) - return config \ No newline at end of file + return config diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 4a4eb6ed0..ee8fdf186 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -15,7 +15,12 @@ from mava.wrappers.auto_reset_wrapper import AutoResetWrapper from mava.wrappers.episode_metrics import RecordEpisodeMetrics from mava.wrappers.gigastep import GigastepWrapper -from mava.wrappers.gym import GymRecordEpisodeMetrics, GymRwareWrapper, GymAgentIDWrapper, _multiagent_worker_shared_memory +from mava.wrappers.gym import ( + GymAgentIDWrapper, + GymRecordEpisodeMetrics, + GymRwareWrapper, + _multiagent_worker_shared_memory, +) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( CleanerWrapper, diff --git a/mava/wrappers/episode_metrics.py b/mava/wrappers/episode_metrics.py index a46dc1b91..a2b0fdb37 100644 --- a/mava/wrappers/episode_metrics.py +++ b/mava/wrappers/episode_metrics.py @@ -75,7 +75,7 @@ def step( # Previous episode return/length until done and then the next episode return. episode_return_info = state.episode_return * not_done + new_episode_return * done episode_length_info = state.episode_length * not_done + new_episode_length * done - + timestep.extras["episode_metrics"] = { "episode_return": episode_return_info, "episode_length": episode_length_info, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index dd77105a9..b5f89b45f 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -245,4 +245,4 @@ def _multiagent_worker_shared_memory( # noqa: CCR001 error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: - env.close() \ No newline at end of file + env.close() From af24082ab3ccd4ac878edd9de9e3e3ed7fa4b9f1 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sat, 13 Jul 2024 23:38:03 +0100 Subject: [PATCH 035/139] fix: fix the num_updates_in_eval in the last eval --- mava/systems/sebulba/ppo/ff_ippo.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index cf598770f..d8893ded8 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -666,11 +666,11 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 episode_metrics = [] train_metrics = [] - # Make sure that the + # Full or partial last eval step. num_updates_in_eval = ( - config.system.num_updates_per_eval - if eval_step != config.arch.num_evaluation - 1 - else remaining_updates + remaining_updates + if eval_step == config.arch.num_evaluation - 1 and remaining_updates + else config.system.num_updates_per_eval ) for _update in range(num_updates_in_eval): sharded_storages = [] From 32ac3890603fc0040bf4bfacc6efb88ba2e2f7f0 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 10:58:05 +0100 Subject: [PATCH 036/139] fix: fixed the num evals cacls --- mava/systems/sebulba/ppo/ff_ippo.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index d8893ded8..71e4e31d3 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -597,11 +597,9 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." - # Calculate number of updates per evaluation. - config.system.num_updates_per_eval, remaining_updates = divmod( - config.system.num_updates, config.arch.num_evaluation - ) + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + config.arch.num_evaluation, remaining_updates = divmod(config.system.num_updates , config.system.num_updates_per_eval) config.arch.num_evaluation += ( remaining_updates != 0 ) # Add an evaluation step if the num_updates is not a multiple of num_evaluation From 45ca5875db7b05e34013bf485636311c9fcec2d4 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 11:04:59 +0100 Subject: [PATCH 037/139] chore : pre commit --- mava/systems/sebulba/ppo/ff_ippo.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 71e4e31d3..a184414d9 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -599,7 +599,9 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 ), "Number of updates per evaluation must be less than total number of updates." # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation - config.arch.num_evaluation, remaining_updates = divmod(config.system.num_updates , config.system.num_updates_per_eval) + config.arch.num_evaluation, remaining_updates = divmod( + config.system.num_updates, config.system.num_updates_per_eval + ) config.arch.num_evaluation += ( remaining_updates != 0 ) # Add an evaluation step if the num_updates is not a multiple of num_evaluation From d6944984146fd1975924453efb28307af09c6836 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 11:12:34 +0100 Subject: [PATCH 038/139] chore: created the anakin and sebulba folders --- mava/systems/{ => anakin}/ppo/__init__.py | 0 mava/systems/{ => anakin}/ppo/ff_ippo.py | 2 +- mava/systems/{ => anakin}/ppo/ff_mappo.py | 2 +- mava/systems/{ => anakin}/ppo/rec_ippo.py | 2 +- mava/systems/{ => anakin}/ppo/rec_mappo.py | 2 +- mava/systems/{ => anakin}/ppo/types.py | 0 mava/systems/{ => anakin}/q_learning/__init__.py | 0 mava/systems/{ => anakin}/q_learning/rec_iql.py | 0 mava/systems/{ => anakin}/q_learning/types.py | 0 mava/systems/{ => anakin}/sac/__init__.py | 0 mava/systems/{ => anakin}/sac/ff_isac.py | 0 mava/systems/{ => anakin}/sac/ff_masac.py | 0 mava/systems/{ => anakin}/sac/types.py | 0 mava/systems/sebulba/ppo/ff_ippo.py | 0 14 files changed, 4 insertions(+), 4 deletions(-) rename mava/systems/{ => anakin}/ppo/__init__.py (100%) rename mava/systems/{ => anakin}/ppo/ff_ippo.py (99%) rename mava/systems/{ => anakin}/ppo/ff_mappo.py (99%) rename mava/systems/{ => anakin}/ppo/rec_ippo.py (99%) rename mava/systems/{ => anakin}/ppo/rec_mappo.py (99%) rename mava/systems/{ => anakin}/ppo/types.py (100%) rename mava/systems/{ => anakin}/q_learning/__init__.py (100%) rename mava/systems/{ => anakin}/q_learning/rec_iql.py (100%) rename mava/systems/{ => anakin}/q_learning/types.py (100%) rename mava/systems/{ => anakin}/sac/__init__.py (100%) rename mava/systems/{ => anakin}/sac/ff_isac.py (100%) rename mava/systems/{ => anakin}/sac/ff_masac.py (100%) rename mava/systems/{ => anakin}/sac/types.py (100%) create mode 100644 mava/systems/sebulba/ppo/ff_ippo.py diff --git a/mava/systems/ppo/__init__.py b/mava/systems/anakin/ppo/__init__.py similarity index 100% rename from mava/systems/ppo/__init__.py rename to mava/systems/anakin/ppo/__init__.py diff --git a/mava/systems/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py similarity index 99% rename from mava/systems/ppo/ff_ippo.py rename to mava/systems/anakin/ppo/ff_ippo.py index 7b45fb45f..f37407dd2 100644 --- a/mava/systems/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -32,7 +32,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py similarity index 99% rename from mava/systems/ppo/ff_mappo.py rename to mava/systems/anakin/ppo/ff_mappo.py index 519fa4f39..127216069 100644 --- a/mava/systems/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -31,7 +31,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py similarity index 99% rename from mava/systems/ppo/rec_ippo.py rename to mava/systems/anakin/ppo/rec_ippo.py index e70a59f07..e4b6740b1 100644 --- a/mava/systems/ppo/rec_ippo.py +++ b/mava/systems/anakin/ppo/rec_ippo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.ppo.types import ( +from mava.systems.anakin.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py similarity index 99% rename from mava/systems/ppo/rec_mappo.py rename to mava/systems/anakin/ppo/rec_mappo.py index 14284cedb..c351ba576 100644 --- a/mava/systems/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.ppo.types import ( +from mava.systems.anakin.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/ppo/types.py b/mava/systems/anakin/ppo/types.py similarity index 100% rename from mava/systems/ppo/types.py rename to mava/systems/anakin/ppo/types.py diff --git a/mava/systems/q_learning/__init__.py b/mava/systems/anakin/q_learning/__init__.py similarity index 100% rename from mava/systems/q_learning/__init__.py rename to mava/systems/anakin/q_learning/__init__.py diff --git a/mava/systems/q_learning/rec_iql.py b/mava/systems/anakin/q_learning/rec_iql.py similarity index 100% rename from mava/systems/q_learning/rec_iql.py rename to mava/systems/anakin/q_learning/rec_iql.py diff --git a/mava/systems/q_learning/types.py b/mava/systems/anakin/q_learning/types.py similarity index 100% rename from mava/systems/q_learning/types.py rename to mava/systems/anakin/q_learning/types.py diff --git a/mava/systems/sac/__init__.py b/mava/systems/anakin/sac/__init__.py similarity index 100% rename from mava/systems/sac/__init__.py rename to mava/systems/anakin/sac/__init__.py diff --git a/mava/systems/sac/ff_isac.py b/mava/systems/anakin/sac/ff_isac.py similarity index 100% rename from mava/systems/sac/ff_isac.py rename to mava/systems/anakin/sac/ff_isac.py diff --git a/mava/systems/sac/ff_masac.py b/mava/systems/anakin/sac/ff_masac.py similarity index 100% rename from mava/systems/sac/ff_masac.py rename to mava/systems/anakin/sac/ff_masac.py diff --git a/mava/systems/sac/types.py b/mava/systems/anakin/sac/types.py similarity index 100% rename from mava/systems/sac/types.py rename to mava/systems/anakin/sac/types.py diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py new file mode 100644 index 000000000..e69de29bb From cb8111fe0c87c616913d165e2f19788533af152d Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 11:18:21 +0100 Subject: [PATCH 039/139] fix: imports and config paths in systems --- mava/systems/anakin/ppo/ff_ippo.py | 2 +- mava/systems/anakin/ppo/ff_mappo.py | 2 +- mava/systems/anakin/ppo/rec_ippo.py | 2 +- mava/systems/anakin/ppo/rec_mappo.py | 2 +- mava/systems/sebulba/ppo/ff_ippo.py | 13 +++++++++++++ mava/utils/checkpointing.py | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/anakin/ppo/ff_ippo.py index f37407dd2..51efd10e7 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/anakin/ppo/ff_ippo.py @@ -578,7 +578,7 @@ def run_experiment(_config: DictConfig) -> float: return eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_ippo.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/anakin/ppo/ff_mappo.py index 127216069..a9364fdfc 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/anakin/ppo/ff_mappo.py @@ -575,7 +575,7 @@ def run_experiment(_config: DictConfig) -> float: return eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_mappo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_mappo.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/anakin/ppo/rec_ippo.py index e4b6740b1..a4d3df428 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/anakin/ppo/rec_ippo.py @@ -735,7 +735,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 return eval_performance -@hydra.main(config_path="../../configs", config_name="default_rec_ippo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_rec_ippo.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py index c351ba576..c2f9dc678 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -726,7 +726,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 return eval_performance -@hydra.main(config_path="../../configs", config_name="default_rec_mappo.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_rec_mappo.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index e69de29bb..21db9ec1c 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -0,0 +1,13 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/mava/utils/checkpointing.py b/mava/utils/checkpointing.py index 8955f76ce..230c4938d 100644 --- a/mava/utils/checkpointing.py +++ b/mava/utils/checkpointing.py @@ -24,7 +24,7 @@ from jax.tree_util import tree_map from omegaconf import DictConfig, OmegaConf -from mava.systems.ppo.types import HiddenStates, Params +from mava.systems.anakin.ppo.types import HiddenStates, Params from mava.types import MavaState # Keep track of the version of the checkpointer From d842375c8e89bc25e73f3ea97b063cc63083c045 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 15:27:15 +0100 Subject: [PATCH 040/139] fix: allow for reproducibility --- mava/evaluator.py | 17 ++++++++++++----- mava/systems/sebulba/ppo/ff_ippo.py | 15 ++++++++++----- mava/wrappers/gym.py | 16 +++++++++++----- 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index 984a42377..8412b2d81 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -348,6 +348,7 @@ def get_sebulba_ff_evaluator_fn( env: Environment, apply_fn: ActorApply, config: DictConfig, + np_rng : np.random.Generator, log_win_rate: bool = False, ) -> SebulbaEvalFn: """Get the evaluator function for feedforward networks. @@ -376,8 +377,9 @@ def get_action( # todo explicetly put these on the learner? they should already return action def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: - - obs, info = env.reset() + + seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) + obs, info = env.reset(seed = seeds) dones = np.full(env.num_envs, False) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) @@ -417,6 +419,7 @@ def get_sebulba_rnn_evaluator_fn( env: Environment, apply_fn: RecActorApply, config: DictConfig, + np_rng : np.random.Generator, scanned_rnn: nn.Module, log_win_rate: bool = False, ) -> SebulbaEvalFn: @@ -448,7 +451,8 @@ def get_action( # todo explicetly put these on the learner? they should already def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: - obs, info = env.reset() + seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) + obs, info = env.reset(seed = seeds) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) hstate = scanned_rnn.initialize_carry( @@ -499,6 +503,7 @@ def make_sebulba_eval_fns( eval_env_fn: Callable, network_apply_fn: Union[ActorApply, RecActorApply], config: DictConfig, + np_rng : np.random.Generator, add_global_state: bool = False, use_recurrent_net: bool = False, scanned_rnn: Optional[nn.Module] = None, @@ -533,6 +538,7 @@ def make_sebulba_eval_fns( eval_env, network_apply_fn, # type: ignore config, + np_rng, scanned_rnn, log_win_rate, ) @@ -540,15 +546,16 @@ def make_sebulba_eval_fns( absolute_eval_env, network_apply_fn, # type: ignore config, + np_rng, scanned_rnn, log_win_rate, ) else: evaluator = get_sebulba_ff_evaluator_fn( - eval_env, network_apply_fn, config, log_win_rate # type: ignore + eval_env, network_apply_fn, config, np_rng, log_win_rate # type: ignore ) absolute_metric_evaluator = get_sebulba_ff_evaluator_fn( - absolute_eval_env, network_apply_fn, config, log_win_rate # type: ignore + absolute_eval_env, network_apply_fn, config, np_rng, log_win_rate # type: ignore ) return evaluator, absolute_metric_evaluator diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index a184414d9..ce7fb224c 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -61,6 +61,7 @@ def rollout( apply_fns: Tuple, learner_devices: List, actor_device_id: int, + seeds: List[int], ) -> None: # setup @@ -89,8 +90,7 @@ def get_action_and_value( params_queue_get_time: deque = deque(maxlen=1) rollout_time: deque = deque(maxlen=1) rollout_queue_put_time: deque = deque(maxlen=1) - - next_obs, info = env.reset() + next_obs, info = env.reset(seed=seeds) next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) move_to_device = lambda x: jax.device_put(x, device=current_actor_device) @@ -586,11 +586,13 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 (key, actor_net_key, critic_net_key), config, learner_devices ) + # Generate Numpy RNG for reproducibility + np_rng = np.random.default_rng(config.system.seed) + # Setup evaluator. - # One key per device for evaluation. evaluator, absolute_metric_evaluator = make_eval_fns( - environments.make_gym_env, apply_fns[0], config - ) # todo: make this more generic + environments.make_gym_env, apply_fns[0], config, np_rng + ) # Calculate total timesteps. config = sebulba_check_total_timesteps(config) @@ -632,6 +634,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) params_queues: List = [] rollout_queues: List = [] + for _d_idx, d_id in enumerate( # Loop through each executor device config.arch.executor_device_ids ): @@ -639,6 +642,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 device_params = jax.device_put(unreplicated_params, devices[d_id]) # Loop through each executor thread for _thread_id in range(config.arch.n_threads_per_executor): + seeds = np_rng.integers(np.iinfo(np.int64).max, size=config.arch.num_envs) params_queues.append(queue.Queue(maxsize=1)) rollout_queues.append(queue.Queue(maxsize=1)) params_queues[-1].put(device_params) @@ -652,6 +656,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 apply_fns, learner_devices, d_id, + seeds, ), ).start() diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index b5f89b45f..d1c36cd54 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -49,7 +49,9 @@ def __init__( self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: if seed is not None: self.env.seed(seed) @@ -96,10 +98,12 @@ def __init__(self, env: gym.Env): self.running_count_episode_return = 0.0 self.running_count_episode_length = 0.0 - def reset(self) -> Tuple: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: # Reset the env - agents_view, info = self._env.reset() + agents_view, info = self._env.reset(seed, options) # Create the metrics dict metrics = { @@ -160,9 +164,11 @@ def __init__(self, env: gym.Env): ] * self.env.num_agents self.observation_space = spaces.Tuple(_observation_boxs) - def reset(self) -> Tuple[np.ndarray, Dict]: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: """Reset the environment.""" - obs, info = self.env.reset() + obs, info = self.env.reset(seed, options) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, info From 0a1ffd0314a87bd799c84bcc0c8578212699e236 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 15:28:25 +0100 Subject: [PATCH 041/139] chore: pre-commits --- mava/evaluator.py | 12 ++++++------ mava/systems/sebulba/ppo/ff_ippo.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index 8412b2d81..bacbb050e 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -348,7 +348,7 @@ def get_sebulba_ff_evaluator_fn( env: Environment, apply_fn: ActorApply, config: DictConfig, - np_rng : np.random.Generator, + np_rng: np.random.Generator, log_win_rate: bool = False, ) -> SebulbaEvalFn: """Get the evaluator function for feedforward networks. @@ -377,9 +377,9 @@ def get_action( # todo explicetly put these on the learner? they should already return action def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: - + seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) - obs, info = env.reset(seed = seeds) + obs, info = env.reset(seed=seeds) dones = np.full(env.num_envs, False) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) @@ -419,7 +419,7 @@ def get_sebulba_rnn_evaluator_fn( env: Environment, apply_fn: RecActorApply, config: DictConfig, - np_rng : np.random.Generator, + np_rng: np.random.Generator, scanned_rnn: nn.Module, log_win_rate: bool = False, ) -> SebulbaEvalFn: @@ -452,7 +452,7 @@ def get_action( # todo explicetly put these on the learner? they should already def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) - obs, info = env.reset(seed = seeds) + obs, info = env.reset(seed=seeds) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) hstate = scanned_rnn.initialize_carry( @@ -503,7 +503,7 @@ def make_sebulba_eval_fns( eval_env_fn: Callable, network_apply_fn: Union[ActorApply, RecActorApply], config: DictConfig, - np_rng : np.random.Generator, + np_rng: np.random.Generator, add_global_state: bool = False, use_recurrent_net: bool = False, scanned_rnn: Optional[nn.Module] = None, diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index ce7fb224c..0f1abb206 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -588,11 +588,11 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 # Generate Numpy RNG for reproducibility np_rng = np.random.default_rng(config.system.seed) - + # Setup evaluator. evaluator, absolute_metric_evaluator = make_eval_fns( environments.make_gym_env, apply_fns[0], config, np_rng - ) + ) # Calculate total timesteps. config = sebulba_check_total_timesteps(config) @@ -634,7 +634,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) params_queues: List = [] rollout_queues: List = [] - + for _d_idx, d_id in enumerate( # Loop through each executor device config.arch.executor_device_ids ): From f1adc3109009f86ccd965e794e7dc9f01f45f375 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 15:30:59 +0100 Subject: [PATCH 042/139] chore: pre-commits --- mava/systems/anakin/ppo/rec_mappo.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/anakin/ppo/rec_mappo.py index c2f9dc678..93736cf10 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/anakin/ppo/rec_mappo.py @@ -726,7 +726,9 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 return eval_performance -@hydra.main(config_path="../../../configs", config_name="default_rec_mappo.yaml", version_base="1.2") +@hydra.main( + config_path="../../../configs", config_name="default_rec_mappo.yaml", version_base="1.2" +) def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. From 3850591b05af82569329dc4cf0eb358df11a8d7e Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 15:41:13 +0100 Subject: [PATCH 043/139] feat: LBF and reproducibility --- mava/utils/make_env.py | 3 +- mava/wrappers/__init__.py | 1 + mava/wrappers/gym.py | 75 ++++++++++++++++++++++++++++++++--- requirements/requirements.txt | 1 + 4 files changed, 73 insertions(+), 7 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 5ee4e697c..9828573e0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -45,6 +45,7 @@ ConnectorWrapper, GigastepWrapper, GymAgentIDWrapper, + GymLBFWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, LbfWrapper, @@ -71,7 +72,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymRwareWrapper} +_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging": GymLBFWrapper} def add_extra_wrappers( diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index ee8fdf186..869e78053 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -17,6 +17,7 @@ from mava.wrappers.gigastep import GigastepWrapper from mava.wrappers.gym import ( GymAgentIDWrapper, + GymLBFWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, _multiagent_worker_shared_memory, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 978ad4033..a9bc5af8e 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -36,7 +36,6 @@ def __init__( add_global_state: bool = False, ): """Initialize the gym wrapper - Args: env (gym.env): gym env instance. use_individual_rewards (bool, optional): Use individual or group rewards. @@ -50,7 +49,9 @@ def __init__( self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: if seed is not None: self.env.seed(seed) @@ -88,6 +89,64 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) +class GymLBFWrapper(gym.Wrapper): + """Wrapper for rware gym environments""" + + def __init__( + self, + env: gym.Env, + use_individual_rewards: bool = False, + add_global_state: bool = False, + ): + """Initialize the gym wrapper + Args: + env (gym.env): gym env instance. + use_individual_rewards (bool, optional): Use individual or group rewards. + Defaults to False. + add_global_state (bool, optional) : Create global observations. Defaults to False. + """ + super().__init__(env) + self._env = env # not having _env leaded tp self.env getting replaced --> circular called + self.use_individual_rewards = use_individual_rewards + self.add_global_state = add_global_state # todo : add the global observations + self.num_agents = len(self._env.action_space) + self.num_actions = self._env.action_space[ + 0 + ].n # todo: all the agents must have the same num_actions, add assertion? + + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + + if seed is not None: + self.env.seed(seed) + + agents_view, info = self._env.reset() + + info = {"actions_mask": self.get_actions_mask(info)} + + return np.array(agents_view), info + + def step(self, actions: NDArray) -> Tuple: # Vect auto rest + + agents_view, reward, terminated, truncated, info = self._env.step(actions) + + info = {"actions_mask": self.get_actions_mask(info)} + + if self.use_individual_rewards: + reward = np.array(reward) + else: + reward = np.array([np.array(reward).sum()] * self.num_agents) + + truncated = [truncated] * self.num_agents + terminated = [terminated] * self.num_agents + + return agents_view, reward, terminated, truncated, info + + def get_actions_mask(self, info: Dict) -> NDArray: + if "action_mask" in info: + return np.array(info["action_mask"]) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + + class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" @@ -97,10 +156,12 @@ def __init__(self, env: gym.Env): self.running_count_episode_return = 0.0 self.running_count_episode_length = 0.0 - def reset(self) -> Tuple: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: # Reset the env - agents_view, info = self._env.reset() + agents_view, info = self._env.reset(seed, options) # Create the metrics dict metrics = { @@ -161,9 +222,11 @@ def __init__(self, env: gym.Env): ] * self.env.num_agents self.observation_space = spaces.Tuple(_observation_boxs) - def reset(self) -> Tuple[np.ndarray, Dict]: + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[np.ndarray, Dict]: """Reset the environment.""" - obs, info = self.env.reset() + obs, info = self.env.reset(seed, options) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, info diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 3b3bc4c58..3a7b96aef 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -9,6 +9,7 @@ jax jaxlib jaxmarl jumanji @ git+https://github.com/sash-a/jumanji +lbforaging @ git+https://github.com/Louay-Ben-nessir/lb-foraging.git matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 mujoco-mjx==3.1.3 From 0a2ee084bfb5b46f7035d48f05a3fb8297b42be8 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 16 Jul 2024 15:45:51 +0100 Subject: [PATCH 044/139] feat : lbf --- mava/utils/make_env.py | 7 +++-- mava/wrappers/__init__.py | 1 + mava/wrappers/gym.py | 58 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 2330674f0..9828573e0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -45,6 +45,7 @@ ConnectorWrapper, GigastepWrapper, GymAgentIDWrapper, + GymLBFWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, LbfWrapper, @@ -71,7 +72,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymRwareWrapper} +_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging": GymLBFWrapper} def add_extra_wrappers( @@ -218,12 +219,12 @@ def make_gym_env( Create a Gym environment. Args: - env_name (str): The name of the environment to create. config (Dict): The configuration of the environment. + num_env (int) : The number of parallel envs to create. add_global_state (bool): Whether to add the global state to the observation. Default False. Returns: - A tuple of the environments. + Async environments. """ base_env_name = config.env.env_name wrapper = _gym_registry[base_env_name] diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index ee8fdf186..869e78053 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -17,6 +17,7 @@ from mava.wrappers.gigastep import GigastepWrapper from mava.wrappers.gym import ( GymAgentIDWrapper, + GymLBFWrapper, GymRecordEpisodeMetrics, GymRwareWrapper, _multiagent_worker_shared_memory, diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index d1c36cd54..a9bc5af8e 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -89,6 +89,64 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) +class GymLBFWrapper(gym.Wrapper): + """Wrapper for rware gym environments""" + + def __init__( + self, + env: gym.Env, + use_individual_rewards: bool = False, + add_global_state: bool = False, + ): + """Initialize the gym wrapper + Args: + env (gym.env): gym env instance. + use_individual_rewards (bool, optional): Use individual or group rewards. + Defaults to False. + add_global_state (bool, optional) : Create global observations. Defaults to False. + """ + super().__init__(env) + self._env = env # not having _env leaded tp self.env getting replaced --> circular called + self.use_individual_rewards = use_individual_rewards + self.add_global_state = add_global_state # todo : add the global observations + self.num_agents = len(self._env.action_space) + self.num_actions = self._env.action_space[ + 0 + ].n # todo: all the agents must have the same num_actions, add assertion? + + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: + + if seed is not None: + self.env.seed(seed) + + agents_view, info = self._env.reset() + + info = {"actions_mask": self.get_actions_mask(info)} + + return np.array(agents_view), info + + def step(self, actions: NDArray) -> Tuple: # Vect auto rest + + agents_view, reward, terminated, truncated, info = self._env.step(actions) + + info = {"actions_mask": self.get_actions_mask(info)} + + if self.use_individual_rewards: + reward = np.array(reward) + else: + reward = np.array([np.array(reward).sum()] * self.num_agents) + + truncated = [truncated] * self.num_agents + terminated = [terminated] * self.num_agents + + return agents_view, reward, terminated, truncated, info + + def get_actions_mask(self, info: Dict) -> NDArray: + if "action_mask" in info: + return np.array(info["action_mask"]) + return np.ones((self.num_agents, self.num_actions), dtype=np.float32) + + class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" From dc9206564c5b4b4c155b1e956abfc872be617ca6 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 09:35:25 +0100 Subject: [PATCH 045/139] fix: sync neptune logging for sebulba to avoid stalling --- mava/configs/arch/anakin.yaml | 2 +- mava/configs/arch/sebulba.yaml | 4 ++-- mava/utils/logger.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index 86e75898b..d58d85286 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -1,5 +1,5 @@ # --- Anakin config --- - +arch_name: "Anakin" # --- Training --- num_envs: 16 # Number of vectorised environments per device. diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index b6a0a9699..e0305e2dc 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,5 +1,5 @@ # --- Sebulba config --- -arch_name: "sebulba" +arch_name: "Sebulba" num_envs: 32 # number of envs per thread # --- Evaluation --- @@ -12,6 +12,6 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- -n_threads_per_executor: 1 # num of different threads/env batches per actor +n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices diff --git a/mava/utils/logger.py b/mava/utils/logger.py index 8273e44a2..dc217f263 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -150,8 +150,9 @@ class NeptuneLogger(BaseLogger): def __init__(self, cfg: DictConfig, unique_token: str) -> None: tags = list(cfg.logger.kwargs.neptune_tag) project = cfg.logger.kwargs.neptune_project + mode = "sync" if cfg.arch.arch_name == "Sebulba" else "async" - self.logger = neptune.init_run(project=project, tags=tags) + self.logger = neptune.init_run(project=project, tags=tags, mode=mode) self.logger["config"] = stringify_unsupported(cfg) self.detailed_logging = cfg.logger.kwargs.detailed_neptune_logging From 133a25060151ccd99b0f0fe1a73af48310dbbbff Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 09:54:45 +0100 Subject: [PATCH 046/139] fix: added missing lbf import --- mava/utils/make_env.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 9828573e0..eeebed9d0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -20,6 +20,7 @@ import gym.wrappers.compatibility import jaxmarl import jumanji +import lbforaging # noqa: F401 used implicitly import matrax from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario From b938c831b7c5217f6e9f898d3c564ac45510c10a Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 10:11:09 +0100 Subject: [PATCH 047/139] fix: seeds need to python arrays not np arrays --- mava/evaluator.py | 4 ++-- mava/systems/sebulba/ppo/ff_ippo.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index bacbb050e..fb611d1b3 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -378,7 +378,7 @@ def get_action( # todo explicetly put these on the learner? they should already def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: - seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) + seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs).tolist() obs, info = env.reset(seed=seeds) dones = np.full(env.num_envs, False) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) @@ -451,7 +451,7 @@ def get_action( # todo explicetly put these on the learner? they should already def eval_episodes(params: FrozenDict, key: chex.PRNGKey) -> Any: - seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs) + seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs).tolist() obs, info = env.reset(seed=seeds) eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/sebulba/ppo/ff_ippo.py index 0f1abb206..42d2732ae 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/sebulba/ppo/ff_ippo.py @@ -642,7 +642,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 device_params = jax.device_put(unreplicated_params, devices[d_id]) # Loop through each executor thread for _thread_id in range(config.arch.n_threads_per_executor): - seeds = np_rng.integers(np.iinfo(np.int64).max, size=config.arch.num_envs) + seeds = np_rng.integers(np.iinfo(np.int64).max, size=config.arch.num_envs).tolist() params_queues.append(queue.Queue(maxsize=1)) rollout_queues.append(queue.Queue(maxsize=1)) params_queues[-1].put(device_params) From a36847680413642c634d214095fb4eab0ad5dcae Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 12:40:51 +0100 Subject: [PATCH 048/139] fix: config and imports for anakin q_learning and sac --- mava/systems/anakin/q_learning/rec_iql.py | 4 ++-- mava/systems/anakin/sac/ff_isac.py | 4 ++-- mava/systems/anakin/sac/ff_masac.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mava/systems/anakin/q_learning/rec_iql.py b/mava/systems/anakin/q_learning/rec_iql.py index 6be8e61a4..89139277a 100644 --- a/mava/systems/anakin/q_learning/rec_iql.py +++ b/mava/systems/anakin/q_learning/rec_iql.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import RecQNetwork, ScannedRNN -from mava.systems.q_learning.types import ( +from mava.systems.anakin.q_learning.types import ( ActionSelectionState, ActionState, LearnerState, @@ -645,7 +645,7 @@ def run_experiment(cfg: DictConfig) -> float: return float(eval_performance) -@hydra.main(config_path="../../configs", config_name="default_rec_iql.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_rec_iql.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/anakin/sac/ff_isac.py b/mava/systems/anakin/sac/ff_isac.py index 2c33028d1..1642176f3 100644 --- a/mava/systems/anakin/sac/ff_isac.py +++ b/mava/systems/anakin/sac/ff_isac.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork -from mava.systems.sac.types import ( +from mava.systems.anakin.sac.types import ( BufferState, LearnerState, Metrics, @@ -607,7 +607,7 @@ def run_experiment(cfg: DictConfig) -> float: return eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_isac.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_isac.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. diff --git a/mava/systems/anakin/sac/ff_masac.py b/mava/systems/anakin/sac/ff_masac.py index 4401906ee..2367a67a4 100644 --- a/mava/systems/anakin/sac/ff_masac.py +++ b/mava/systems/anakin/sac/ff_masac.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork -from mava.systems.sac.types import ( +from mava.systems.anakin.sac.types import ( BufferState, LearnerState, Metrics, @@ -626,7 +626,7 @@ def run_experiment(cfg: DictConfig) -> float: return eval_performance -@hydra.main(config_path="../../configs", config_name="default_ff_masac.yaml", version_base="1.2") +@hydra.main(config_path="../../../configs", config_name="default_ff_masac.yaml", version_base="1.2") def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. From 32433ff2d93aee917f9a9504ff8d19d94be33fb1 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 14:17:38 +0100 Subject: [PATCH 049/139] chore: arch_name for anakin --- mava/configs/arch/anakin.yaml | 1 + mava/configs/arch/sebulba.yaml | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index 86e75898b..6e15238dc 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -1,4 +1,5 @@ # --- Anakin config --- +arch_name: "Anakin" # --- Training --- num_envs: 16 # Number of vectorised environments per device. diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index b6a0a9699..f38324e86 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,8 @@ # --- Sebulba config --- -arch_name: "sebulba" -num_envs: 32 # number of envs per thread +arch_name: "Sebulba" + +# --- Training --- +num_envs: 32 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select From a68c8e944c9e118eba10acbd3655332d0d935c24 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 14:18:56 +0100 Subject: [PATCH 050/139] fix: sum the rewards when using a shared reward --- mava/wrappers/gym.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index a9bc5af8e..83c523702 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -75,7 +75,7 @@ def step(self, actions: NDArray) -> Tuple: if self.use_individual_rewards: reward = np.array(reward) else: - reward = np.array([np.array(reward).mean()] * self.num_agents) + reward = np.array([np.array(reward).sum()] * self.num_agents) return agents_view, reward, terminated, truncated, info From 8cee7ac0dc5c9b3d927062f0951a8b3e100173e6 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 15:50:11 +0100 Subject: [PATCH 051/139] fix: configs revamp --- mava/configs/env/gym.yaml | 24 ++++++++++--------- .../configs/env/scenario/gym-10x10-3p-3f.yaml | 15 ++++++++++++ .../configs/env/scenario/gym-15x15-3p-5f.yaml | 15 ++++++++++++ .../configs/env/scenario/gym-15x15-4p-3f.yaml | 15 ++++++++++++ .../configs/env/scenario/gym-15x15-4p-5f.yaml | 15 ++++++++++++ .../env/scenario/gym-2s-10x10-3p-3f.yaml | 15 ++++++++++++ .../env/scenario/gym-2s-8x8-2p-2f-coop.yaml | 15 ++++++++++++ .../env/scenario/gym-8x8-2p-2f-coop.yaml | 15 ++++++++++++ mava/configs/env/scenario/gym-small-4ag.yaml | 14 +++++++++++ mava/configs/env/scenario/gym-tiny-2ag.yaml | 14 +++++++++++ .../env/scenario/gym-tiny-4ag-easy.yaml | 14 +++++++++++ mava/configs/env/scenario/gym-tiny-4ag.yaml | 14 +++++++++++ mava/utils/make_env.py | 23 +++++++++--------- mava/wrappers/gym.py | 24 +++++++++---------- 14 files changed, 198 insertions(+), 34 deletions(-) create mode 100644 mava/configs/env/scenario/gym-10x10-3p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-15x15-3p-5f.yaml create mode 100644 mava/configs/env/scenario/gym-15x15-4p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-15x15-4p-5f.yaml create mode 100644 mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml create mode 100644 mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml create mode 100644 mava/configs/env/scenario/gym-small-4ag.yaml create mode 100644 mava/configs/env/scenario/gym-tiny-2ag.yaml create mode 100644 mava/configs/env/scenario/gym-tiny-4ag-easy.yaml create mode 100644 mava/configs/env/scenario/gym-tiny-4ag.yaml diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml index 1e197a45e..295b9974e 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym.yaml @@ -1,22 +1,24 @@ # ---Environment Configs--- +scenario: gym-2s-8x8-2p-2f-coop copy -scenario: rware:rware-tiny-2ag-v1 # [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] - -env_name: RobotWarehouse # Used for logging purposes. +env_name: Gym # Used for logging purposes, will get changed to the scenario name at runtime. # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. eval_metric: episode_return -# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. -# This should not be changed. -implicit_agent_id: False -# Whether or not to log the winrate of this environment. This should not be changed as not all -# environments have a winrate metric. +# Whether the add agents IDs to the observations returned by the environment. +add_agent_id : False + +# Whether or not to log the winrate of this environment. log_win_rate: False -# Weather or not to average the returned rewards over all of the agents. -use_individual_rewards: True +# Weather or not to sum the returned rewards over all of the agents. +use_shared_rewards: True kwargs: - time_limit: 500 + {} + +# Possible scenarios: +# RobotWarehouse : [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] +# LevelBasedForaging : [2s-8x8-2p-2f-coop, 8x8-2p-2f-coop, 2s-10x10-3p-3f, 10x10-3p-3f, 15x15-3p-5f, 15x15-4p-3f, 15x15-4p-5f] \ No newline at end of file diff --git a/mava/configs/env/scenario/gym-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-10x10-3p-3f.yaml new file mode 100644 index 000000000..386431be4 --- /dev/null +++ b/mava/configs/env/scenario/gym-10x10-3p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 10x10-3p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 10x10-3p-3f + +task_config: + field_size: [10,10] + sight: 10 + num_agents: 3 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-15x15-3p-5f.yaml new file mode 100644 index 000000000..1a8380511 --- /dev/null +++ b/mava/configs/env/scenario/gym-15x15-3p-5f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-3p-5f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-3p-5f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 3 + max_food: 5 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-15x15-4p-3f.yaml new file mode 100644 index 000000000..fa22f737b --- /dev/null +++ b/mava/configs/env/scenario/gym-15x15-4p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-4p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-4p-3f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 4 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-15x15-4p-5f.yaml new file mode 100644 index 000000000..28937215c --- /dev/null +++ b/mava/configs/env/scenario/gym-15x15-4p-5f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-4p-5f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-4p-5f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 4 + max_food: 5 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml new file mode 100644 index 000000000..f0262eb8d --- /dev/null +++ b/mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 2s10x10-3p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 2s-10x10-3p-3f + +task_config: + field_size: [10, 10] + sight: 2 + num_agents: 3 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml new file mode 100644 index 000000000..ffdc5be0e --- /dev/null +++ b/mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml @@ -0,0 +1,15 @@ +# The config of the 2s-8x8-2p-2f-coop scenario with the VectorObserver set as default. +name: LevelBasedForaging +task_name: 2s-8x8-2p-2f-coop + +task_config: + field_size: [8, 8] # size of the grid to generate. + sight: 2 # field of view of an agent. + num_agents: 2 # number of agents on the grid. + max_food: 2 # number of food in the environment. + max_player_level: 2 # maximum level of the agents (inclusive). + force_coop: True # force cooperation between agents. + max_episode_steps: 50 # max number of steps per episode. + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml new file mode 100644 index 000000000..52519fecb --- /dev/null +++ b/mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml @@ -0,0 +1,15 @@ +# The config of the 8x8-2p-2f-coop scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 8x8-2p-2f-coop + +task_config: + field_size: [8, 8] + sight: 8 + num_agents: 2 + max_food: 2 + max_player_level: 2 + force_coop: True + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-small-4ag.yaml b/mava/configs/env/scenario/gym-small-4ag.yaml new file mode 100644 index 000000000..af3eb830b --- /dev/null +++ b/mava/configs/env/scenario/gym-small-4ag.yaml @@ -0,0 +1,14 @@ +# The config of the small-4ag environment +name: RobotWarehouse +task_name: small-4ag + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-tiny-2ag.yaml b/mava/configs/env/scenario/gym-tiny-2ag.yaml new file mode 100644 index 000000000..e648887a0 --- /dev/null +++ b/mava/configs/env/scenario/gym-tiny-2ag.yaml @@ -0,0 +1,14 @@ +# The config of the tiny-2ag environment +name: RobotWarehouse +task_name: tiny-2ag + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 2 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-tiny-4ag-easy.yaml b/mava/configs/env/scenario/gym-tiny-4ag-easy.yaml new file mode 100644 index 000000000..7d8840882 --- /dev/null +++ b/mava/configs/env/scenario/gym-tiny-4ag-easy.yaml @@ -0,0 +1,14 @@ +# The config of the tiny-4ag-easy environment +name: RobotWarehouse +task_name: tiny-4ag-easy + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 8 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-tiny-4ag.yaml b/mava/configs/env/scenario/gym-tiny-4ag.yaml new file mode 100644 index 000000000..dbfe55bd4 --- /dev/null +++ b/mava/configs/env/scenario/gym-tiny-4ag.yaml @@ -0,0 +1,14 @@ +# The config of the tiny_4ag environment +name: RobotWarehouse +task_name: tiny-4ag + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index eeebed9d0..3f851fa76 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -20,7 +20,8 @@ import gym.wrappers.compatibility import jaxmarl import jumanji -import lbforaging # noqa: F401 used implicitly +from lbforaging.foraging import environment as GymLBF +import rware.warehouse as GymRware import matrax from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario @@ -73,7 +74,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging": GymLBFWrapper} +_gym_registry = {"RobotWarehouse": (GymRware, GymRwareWrapper), "LevelBasedForaging": (GymLBF ,GymLBFWrapper)} def add_extra_wrappers( @@ -215,7 +216,7 @@ def make_gym_env( config: DictConfig, num_env: int, add_global_state: bool = False, -) -> Environment: # todo : create the appropriate annotation for the sync vector +) -> gym.vector.AsyncVectorEnv: """ Create a Gym environment. @@ -227,20 +228,20 @@ def make_gym_env( Returns: Async environments. """ - base_env_name = config.env.env_name - wrapper = _gym_registry[base_env_name] + base_env_name = config.env.scenario.name + env_maker, wrapper = _gym_registry[base_env_name] def create_gym_env( config: DictConfig, add_global_state: bool = False - ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. - env = gym.make(config.env.scenario) - wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state) - if not config.env.implicit_agent_id: - wrapped_env = GymAgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . + ) -> Environment: + env = env_maker(**config.env.scenario.task_config) + wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) + if config.env.add_agent_id: + wrapped_env = GymAgentIDWrapper(wrapped_env) wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names + envs = gym.vector.AsyncVectorEnv( [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], worker=_multiagent_worker_shared_memory, ) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 83c523702..8112a087e 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -32,19 +32,19 @@ class GymRwareWrapper(gym.Wrapper): def __init__( self, env: gym.Env, - use_individual_rewards: bool = False, + use_shared_rewards: bool = False, add_global_state: bool = False, ): """Initialize the gym wrapper Args: env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. + use_shared_rewards (bool, optional): Use individual or shared rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) self._env = env - self.use_individual_rewards = use_individual_rewards + self.use_shared_rewards = use_shared_rewards self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n @@ -72,10 +72,10 @@ def step(self, actions: NDArray) -> Tuple: if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) - if self.use_individual_rewards: - reward = np.array(reward) - else: + if self.use_shared_rewards: reward = np.array([np.array(reward).sum()] * self.num_agents) + else: + reward = np.array(reward) return agents_view, reward, terminated, truncated, info @@ -95,19 +95,19 @@ class GymLBFWrapper(gym.Wrapper): def __init__( self, env: gym.Env, - use_individual_rewards: bool = False, + use_shared_rewards: bool = False, add_global_state: bool = False, ): """Initialize the gym wrapper Args: env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. + use_shared_rewards (bool, optional): Use individual or shared rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) self._env = env # not having _env leaded tp self.env getting replaced --> circular called - self.use_individual_rewards = use_individual_rewards + self.use_shared_rewards = use_shared_rewards self.add_global_state = add_global_state # todo : add the global observations self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ @@ -131,10 +131,10 @@ def step(self, actions: NDArray) -> Tuple: # Vect auto rest info = {"actions_mask": self.get_actions_mask(info)} - if self.use_individual_rewards: - reward = np.array(reward) - else: + if self.use_shared_rewards: reward = np.array([np.array(reward).sum()] * self.num_agents) + else: + reward = np.array(reward) truncated = [truncated] * self.num_agents terminated = [terminated] * self.num_agents From e199f3a19b50990735f9740388639fb0ec5d36f5 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 15:52:50 +0100 Subject: [PATCH 052/139] chore: pre-commits --- mava/configs/env/gym.yaml | 2 +- mava/utils/make_env.py | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml index 295b9974e..2ee6f9256 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym.yaml @@ -21,4 +21,4 @@ kwargs: # Possible scenarios: # RobotWarehouse : [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] -# LevelBasedForaging : [2s-8x8-2p-2f-coop, 8x8-2p-2f-coop, 2s-10x10-3p-3f, 10x10-3p-3f, 15x15-3p-5f, 15x15-4p-3f, 15x15-4p-5f] \ No newline at end of file +# LevelBasedForaging : [2s-8x8-2p-2f-coop, 8x8-2p-2f-coop, 2s-10x10-3p-3f, 10x10-3p-3f, 15x15-3p-5f, 15x15-4p-3f, 15x15-4p-5f] diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 3f851fa76..9d89ab581 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -20,9 +20,8 @@ import gym.wrappers.compatibility import jaxmarl import jumanji -from lbforaging.foraging import environment as GymLBF -import rware.warehouse as GymRware import matrax +import rware.warehouse as gym_rware from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment @@ -38,6 +37,7 @@ from jumanji.environments.routing.robot_warehouse.generator import ( RandomGenerator as RwareRandomGenerator, ) +from lbforaging.foraging import environment as gym_lbf from omegaconf import DictConfig from mava.wrappers import ( @@ -74,7 +74,10 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": (GymRware, GymRwareWrapper), "LevelBasedForaging": (GymLBF ,GymLBFWrapper)} +_gym_registry = { + "RobotWarehouse": (gym_rware, GymRwareWrapper), + "LevelBasedForaging": (gym_lbf, GymLBFWrapper), +} def add_extra_wrappers( @@ -216,7 +219,7 @@ def make_gym_env( config: DictConfig, num_env: int, add_global_state: bool = False, -) -> gym.vector.AsyncVectorEnv: +) -> gym.vector.AsyncVectorEnv: """ Create a Gym environment. @@ -231,17 +234,15 @@ def make_gym_env( base_env_name = config.env.scenario.name env_maker, wrapper = _gym_registry[base_env_name] - def create_gym_env( - config: DictConfig, add_global_state: bool = False - ) -> Environment: + def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Environment: env = env_maker(**config.env.scenario.task_config) wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) if config.env.add_agent_id: - wrapped_env = GymAgentIDWrapper(wrapped_env) + wrapped_env = GymAgentIDWrapper(wrapped_env) wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - envs = gym.vector.AsyncVectorEnv( + envs = gym.vector.AsyncVectorEnv( [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], worker=_multiagent_worker_shared_memory, ) From 2b71d3b32652c34c6666b10266a184ba6dac17c2 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 16:18:55 +0100 Subject: [PATCH 053/139] fix: more config changes --- mava/configs/arch/anakin.yaml | 2 +- mava/configs/arch/sebulba.yaml | 2 +- mava/configs/default_ff_ippo.yaml | 2 +- mava/configs/env/{gym.yaml => gym_lbf.yaml} | 8 ++----- mava/configs/env/rware_gym.yaml | 20 ++++++++++++++++++ mava/wrappers/gym.py | 23 +++++++++++++-------- 6 files changed, 39 insertions(+), 18 deletions(-) rename mava/configs/env/{gym.yaml => gym_lbf.yaml} (60%) create mode 100644 mava/configs/env/rware_gym.yaml diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index 6e15238dc..d6414f5ac 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -1,5 +1,5 @@ # --- Anakin config --- -arch_name: "Anakin" +arch_name: anakin # --- Training --- num_envs: 16 # Number of vectorised environments per device. diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index f38324e86..0ff3707cd 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,5 +1,5 @@ # --- Sebulba config --- -arch_name: "Sebulba" +arch_name: sebulba # --- Training --- num_envs: 32 # number of environments per thread. diff --git a/mava/configs/default_ff_ippo.yaml b/mava/configs/default_ff_ippo.yaml index d942584ce..c4aa6ea49 100644 --- a/mava/configs/default_ff_ippo.yaml +++ b/mava/configs/default_ff_ippo.yaml @@ -3,5 +3,5 @@ defaults: - arch: anakin - system: ppo/ff_ippo - network: mlp - - env: rware + - env: rware_gym - _self_ diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym_lbf.yaml similarity index 60% rename from mava/configs/env/gym.yaml rename to mava/configs/env/gym_lbf.yaml index 2ee6f9256..dfabeb888 100644 --- a/mava/configs/env/gym.yaml +++ b/mava/configs/env/gym_lbf.yaml @@ -1,7 +1,7 @@ # ---Environment Configs--- -scenario: gym-2s-8x8-2p-2f-coop copy +scenario: gym-2s-8x8-2p-2f-coop copy # [gym-2s-8x8-2p-2f-coop, gym-8x8-2p-2f-coop, gym-2s-10x10-3p-3f, gym-10x10-3p-3f, gym-15x15-3p-5f, gym-15x15-4p-3f, gym-15x15-4p-5f] -env_name: Gym # Used for logging purposes, will get changed to the scenario name at runtime. +env_name: LevelBasedForaging # Used for logging purposes. # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. @@ -18,7 +18,3 @@ use_shared_rewards: True kwargs: {} - -# Possible scenarios: -# RobotWarehouse : [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] -# LevelBasedForaging : [2s-8x8-2p-2f-coop, 8x8-2p-2f-coop, 2s-10x10-3p-3f, 10x10-3p-3f, 15x15-3p-5f, 15x15-4p-3f, 15x15-4p-5f] diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml new file mode 100644 index 000000000..a61bc734e --- /dev/null +++ b/mava/configs/env/rware_gym.yaml @@ -0,0 +1,20 @@ +# ---Environment Configs--- +scenario: gym-2s-8x8-2p-2f-coop # [gym-tiny-2ag, gym-tiny-4ag, gym-tiny-4ag-easy, gym-small-4ag] + +env_name: RobotWarehouse # Used for logging purposes. + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the add agents IDs to the observations returned by the environment. +add_agent_id : False + +# Whether or not to log the winrate of this environment. +log_win_rate: False + +# Weather or not to sum the returned rewards over all of the agents. +use_shared_rewards: True + +kwargs: + {} diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 8112a087e..396f78ef4 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -32,10 +32,10 @@ class GymRwareWrapper(gym.Wrapper): def __init__( self, env: gym.Env, - use_shared_rewards: bool = False, + use_shared_rewards: bool = True, add_global_state: bool = False, ): - """Initialize the gym wrapper + """Initialise the gym wrapper Args: env (gym.env): gym env instance. use_shared_rewards (bool, optional): Use individual or shared rewards. @@ -95,10 +95,10 @@ class GymLBFWrapper(gym.Wrapper): def __init__( self, env: gym.Env, - use_shared_rewards: bool = False, + use_shared_rewards: bool = True, add_global_state: bool = False, ): - """Initialize the gym wrapper + """Initialise the gym wrapper Args: env (gym.env): gym env instance. use_shared_rewards (bool, optional): Use individual or shared rewards. @@ -106,13 +106,13 @@ def __init__( add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) - self._env = env # not having _env leaded tp self.env getting replaced --> circular called + self._env = env self.use_shared_rewards = use_shared_rewards - self.add_global_state = add_global_state # todo : add the global observations + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[ 0 - ].n # todo: all the agents must have the same num_actions, add assertion? + ].n def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: @@ -130,7 +130,9 @@ def step(self, actions: NDArray) -> Tuple: # Vect auto rest agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} - + if self.add_global_state: + info["global_obs"] = self.get_global_obs(agents_view) + if self.use_shared_rewards: reward = np.array([np.array(reward).sum()] * self.num_agents) else: @@ -145,7 +147,10 @@ def get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - + + def get_global_obs(self, obs: NDArray) -> NDArray: + global_obs = np.concatenate(obs, axis=0) + return np.tile(global_obs, (self.num_agents, 1)) class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" From e87ad286cb87fde7c40fde4f5c83ca5692e714d7 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Wed, 17 Jul 2024 16:20:37 +0100 Subject: [PATCH 054/139] chore: pre-commits --- mava/wrappers/gym.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 396f78ef4..13975a9a5 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -106,13 +106,11 @@ def __init__( add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) - self._env = env + self._env = env self.use_shared_rewards = use_shared_rewards - self.add_global_state = add_global_state + self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[ - 0 - ].n + self.num_actions = self._env.action_space[0].n def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: @@ -132,7 +130,7 @@ def step(self, actions: NDArray) -> Tuple: # Vect auto rest info = {"actions_mask": self.get_actions_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) - + if self.use_shared_rewards: reward = np.array([np.array(reward).sum()] * self.num_agents) else: @@ -147,11 +145,12 @@ def get_actions_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - + def get_global_obs(self, obs: NDArray) -> NDArray: global_obs = np.concatenate(obs, axis=0) return np.tile(global_obs, (self.num_agents, 1)) + class GymRecordEpisodeMetrics(gym.Wrapper): """Record the episode returns and lengths.""" From 2b587c05626bf469dbf499d2c86b6b414152ba0c Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:24:20 +0100 Subject: [PATCH 055/139] chore: renamed arch_name to architecture_name --- mava/configs/arch/anakin.yaml | 2 +- mava/configs/arch/sebulba.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index d6414f5ac..eb948b7a1 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -1,5 +1,5 @@ # --- Anakin config --- -arch_name: anakin +architecture_name: anakin # --- Training --- num_envs: 16 # Number of vectorised environments per device. diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 0ff3707cd..0b539059b 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,5 +1,5 @@ # --- Sebulba config --- -arch_name: sebulba +architecture_name: sebulba # --- Training --- num_envs: 32 # number of environments per thread. From 5ad4d2fa5e6962826a70e7da24f2ad9db515a09d Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:30:39 +0100 Subject: [PATCH 056/139] chore: config files rename --- mava/configs/env/{gym_lbf.yaml => lbf_gym.yaml} | 7 ++----- mava/configs/env/rware_gym.yaml | 7 ++----- .../{gym-10x10-3p-3f.yaml => gym-lbf-10x10-3p-3f.yaml} | 0 .../{gym-15x15-3p-5f.yaml => gym-lbf-15x15-3p-5f.yaml} | 0 .../{gym-15x15-4p-3f.yaml => gym-lbf-15x15-4p-3f.yaml} | 0 .../{gym-15x15-4p-5f.yaml => gym-lbf-15x15-4p-5f.yaml} | 0 ...gym-2s-10x10-3p-3f.yaml => gym-lbf-2s-10x10-3p-3f.yaml} | 0 ...-8x8-2p-2f-coop.yaml => gym-lbf-2s-8x8-2p-2f-coop.yaml} | 0 ...gym-8x8-2p-2f-coop.yaml => gym-lbf-8x8-2p-2f-coop.yaml} | 0 .../{gym-small-4ag.yaml => gym-rware-small-4ag.yaml} | 0 .../{gym-tiny-2ag.yaml => gym-rware-tiny-2ag.yaml} | 0 ...gym-tiny-4ag-easy.yaml => gym-rware-tiny-4ag-easy.yaml} | 0 .../{gym-tiny-4ag.yaml => gym-rware-tiny-4ag.yaml} | 0 13 files changed, 4 insertions(+), 10 deletions(-) rename mava/configs/env/{gym_lbf.yaml => lbf_gym.yaml} (70%) rename mava/configs/env/scenario/{gym-10x10-3p-3f.yaml => gym-lbf-10x10-3p-3f.yaml} (100%) rename mava/configs/env/scenario/{gym-15x15-3p-5f.yaml => gym-lbf-15x15-3p-5f.yaml} (100%) rename mava/configs/env/scenario/{gym-15x15-4p-3f.yaml => gym-lbf-15x15-4p-3f.yaml} (100%) rename mava/configs/env/scenario/{gym-15x15-4p-5f.yaml => gym-lbf-15x15-4p-5f.yaml} (100%) rename mava/configs/env/scenario/{gym-2s-10x10-3p-3f.yaml => gym-lbf-2s-10x10-3p-3f.yaml} (100%) rename mava/configs/env/scenario/{gym-2s-8x8-2p-2f-coop.yaml => gym-lbf-2s-8x8-2p-2f-coop.yaml} (100%) rename mava/configs/env/scenario/{gym-8x8-2p-2f-coop.yaml => gym-lbf-8x8-2p-2f-coop.yaml} (100%) rename mava/configs/env/scenario/{gym-small-4ag.yaml => gym-rware-small-4ag.yaml} (100%) rename mava/configs/env/scenario/{gym-tiny-2ag.yaml => gym-rware-tiny-2ag.yaml} (100%) rename mava/configs/env/scenario/{gym-tiny-4ag-easy.yaml => gym-rware-tiny-4ag-easy.yaml} (100%) rename mava/configs/env/scenario/{gym-tiny-4ag.yaml => gym-rware-tiny-4ag.yaml} (100%) diff --git a/mava/configs/env/gym_lbf.yaml b/mava/configs/env/lbf_gym.yaml similarity index 70% rename from mava/configs/env/gym_lbf.yaml rename to mava/configs/env/lbf_gym.yaml index dfabeb888..3fca4d62d 100644 --- a/mava/configs/env/gym_lbf.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,5 +1,5 @@ # ---Environment Configs--- -scenario: gym-2s-8x8-2p-2f-coop copy # [gym-2s-8x8-2p-2f-coop, gym-8x8-2p-2f-coop, gym-2s-10x10-3p-3f, gym-10x10-3p-3f, gym-15x15-3p-5f, gym-15x15-4p-3f, gym-15x15-4p-5f] +scenario: gym-2s-8x8-2p-2f-coop copy # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. @@ -14,7 +14,4 @@ add_agent_id : False log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. -use_shared_rewards: True - -kwargs: - {} +use_shared_rewards: True \ No newline at end of file diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index a61bc734e..576bf0d2b 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -1,5 +1,5 @@ # ---Environment Configs--- -scenario: gym-2s-8x8-2p-2f-coop # [gym-tiny-2ag, gym-tiny-4ag, gym-tiny-4ag-easy, gym-small-4ag] +scenario: gym-2s-8x8-2p-2f-coop # [gym-rware-tiny-2ag, gym-rware-tiny-4ag, gym-rware-tiny-4ag-easy, gym-rware-small-4ag] env_name: RobotWarehouse # Used for logging purposes. @@ -14,7 +14,4 @@ add_agent_id : False log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. -use_shared_rewards: True - -kwargs: - {} +use_shared_rewards: True \ No newline at end of file diff --git a/mava/configs/env/scenario/gym-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml similarity index 100% rename from mava/configs/env/scenario/gym-10x10-3p-3f.yaml rename to mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml diff --git a/mava/configs/env/scenario/gym-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml similarity index 100% rename from mava/configs/env/scenario/gym-15x15-3p-5f.yaml rename to mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml diff --git a/mava/configs/env/scenario/gym-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml similarity index 100% rename from mava/configs/env/scenario/gym-15x15-4p-3f.yaml rename to mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml diff --git a/mava/configs/env/scenario/gym-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml similarity index 100% rename from mava/configs/env/scenario/gym-15x15-4p-5f.yaml rename to mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml diff --git a/mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml similarity index 100% rename from mava/configs/env/scenario/gym-2s-10x10-3p-3f.yaml rename to mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml diff --git a/mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml similarity index 100% rename from mava/configs/env/scenario/gym-2s-8x8-2p-2f-coop.yaml rename to mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml diff --git a/mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml similarity index 100% rename from mava/configs/env/scenario/gym-8x8-2p-2f-coop.yaml rename to mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml diff --git a/mava/configs/env/scenario/gym-small-4ag.yaml b/mava/configs/env/scenario/gym-rware-small-4ag.yaml similarity index 100% rename from mava/configs/env/scenario/gym-small-4ag.yaml rename to mava/configs/env/scenario/gym-rware-small-4ag.yaml diff --git a/mava/configs/env/scenario/gym-tiny-2ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml similarity index 100% rename from mava/configs/env/scenario/gym-tiny-2ag.yaml rename to mava/configs/env/scenario/gym-rware-tiny-2ag.yaml diff --git a/mava/configs/env/scenario/gym-tiny-4ag-easy.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml similarity index 100% rename from mava/configs/env/scenario/gym-tiny-4ag-easy.yaml rename to mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml diff --git a/mava/configs/env/scenario/gym-tiny-4ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml similarity index 100% rename from mava/configs/env/scenario/gym-tiny-4ag.yaml rename to mava/configs/env/scenario/gym-rware-tiny-4ag.yaml From 432071e9476aadf2342ea0f571fd0d4b30edc7cd Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:40:55 +0100 Subject: [PATCH 057/139] fix; moved from gym to gymnasium --- mava/configs/env/lbf_gym.yaml | 2 +- mava/configs/env/rware_gym.yaml | 2 +- mava/utils/make_env.py | 14 +++++++------- mava/wrappers/gym.py | 28 ++++++++++++++-------------- requirements/requirements.txt | 3 ++- 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 3fca4d62d..0c6016dd4 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -14,4 +14,4 @@ add_agent_id : False log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. -use_shared_rewards: True \ No newline at end of file +use_shared_rewards: True diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index 576bf0d2b..4d5e0c7f3 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -14,4 +14,4 @@ add_agent_id : False log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. -use_shared_rewards: True \ No newline at end of file +use_shared_rewards: True diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 9d89ab581..dcab4216a 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -14,10 +14,10 @@ from typing import Tuple -import gym -import gym.vector -import gym.wrappers -import gym.wrappers.compatibility +import gymnasium +import gymnasium.vector +import gymnasium.wrappers +import gymnasium.wrappers.compatibility import jaxmarl import jumanji import matrax @@ -219,9 +219,9 @@ def make_gym_env( config: DictConfig, num_env: int, add_global_state: bool = False, -) -> gym.vector.AsyncVectorEnv: +) -> gymnasium.vector.AsyncVectorEnv: """ - Create a Gym environment. + Create a gymnasium environment. Args: config (Dict): The configuration of the environment. @@ -242,7 +242,7 @@ def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Enviro wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - envs = gym.vector.AsyncVectorEnv( + envs = gymnasium.vector.AsyncVectorEnv( [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], worker=_multiagent_worker_shared_memory, ) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 13975a9a5..5b8f9cd74 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -16,28 +16,28 @@ import warnings from typing import Any, Callable, Dict, Optional, Tuple -import gym +import gymnasium import numpy as np -from gym import spaces -from gym.vector.utils import write_to_shared_memory +from gymnasium import spaces +from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray # Filter out the warnings -warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") +warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): +class GymRwareWrapper(gymnasium.Wrapper): """Wrapper for rware gym environments.""" def __init__( self, - env: gym.Env, + env: gymnasium.Env, use_shared_rewards: bool = True, add_global_state: bool = False, ): """Initialise the gym wrapper Args: - env (gym.env): gym env instance. + env (gymnasium.env): gymnasium env instance. use_shared_rewards (bool, optional): Use individual or shared rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. @@ -89,18 +89,18 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class GymLBFWrapper(gym.Wrapper): +class GymLBFWrapper(gymnasium.Wrapper): """Wrapper for rware gym environments""" def __init__( self, - env: gym.Env, + env: gymnasium.Env, use_shared_rewards: bool = True, add_global_state: bool = False, ): """Initialise the gym wrapper Args: - env (gym.env): gym env instance. + env (gymnasium.env): gymnasium env instance. use_shared_rewards (bool, optional): Use individual or shared rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. @@ -151,10 +151,10 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class GymRecordEpisodeMetrics(gym.Wrapper): +class GymRecordEpisodeMetrics(gymnasium.Wrapper): """Record the episode returns and lengths.""" - def __init__(self, env: gym.Env): + def __init__(self, env: gymnasium.Env): super().__init__(env) self._env = env self.running_count_episode_return = 0.0 @@ -206,10 +206,10 @@ def step(self, actions: NDArray) -> Tuple: return agents_view, reward, terminated, truncated, info -class GymAgentIDWrapper(gym.Wrapper): +class GymAgentIDWrapper(gymnasium.Wrapper): """Add one hot agent IDs to observation.""" - def __init__(self, env: gym.Env): + def __init__(self, env: gymnasium.Env): super().__init__(env) self.agent_ids = np.eye(self.env.num_agents) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 3a7b96aef..74b07af25 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -3,13 +3,14 @@ distrax @ git+https://github.com/google-deepmind/distrax # distrax release does flashbax~=0.1.0 flax gigastep @ git+https://github.com/mlech26l/gigastep +gymnasium hydra-core==1.3.2 id-marl-eval @ git+https://github.com/instadeepai/marl-eval jax jaxlib jaxmarl jumanji @ git+https://github.com/sash-a/jumanji -lbforaging @ git+https://github.com/Louay-Ben-nessir/lb-foraging.git +lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 mujoco-mjx==3.1.3 From 77e6e126e73e02ce5ad62105b08372a28edda699 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:44:37 +0100 Subject: [PATCH 058/139] feat: generic gym wrapper --- mava/utils/make_env.py | 4 +-- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 51 +++++++-------------------------------- 3 files changed, 12 insertions(+), 45 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index dcab4216a..a2dd6ef54 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -49,7 +49,7 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, - GymRwareWrapper, + GymWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -75,7 +75,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} _gym_registry = { - "RobotWarehouse": (gym_rware, GymRwareWrapper), + "RobotWarehouse": (gym_rware, GymWrapper), "LevelBasedForaging": (gym_lbf, GymLBFWrapper), } diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 869e78053..03e2223dc 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -19,7 +19,7 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, - GymRwareWrapper, + GymWrapper, _multiagent_worker_shared_memory, ) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 5b8f9cd74..49dbafd1f 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -26,8 +26,8 @@ warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") -class GymRwareWrapper(gymnasium.Wrapper): - """Wrapper for rware gym environments.""" +class GymWrapper(gymnasium.Wrapper): + """Wrapper for gym environments.""" def __init__( self, @@ -89,7 +89,7 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class GymLBFWrapper(gymnasium.Wrapper): +class GymLBFWrapper(GymWrapper): """Wrapper for rware gym environments""" def __init__( @@ -105,50 +105,17 @@ def __init__( Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. """ - super().__init__(env) - self._env = env - self.use_shared_rewards = use_shared_rewards - self.add_global_state = add_global_state - self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[0].n - - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: - - if seed is not None: - self.env.seed(seed) + super().__init__(env, use_shared_rewards, add_global_state) - agents_view, info = self._env.reset() + def step(self, actions: NDArray) -> Tuple: - info = {"actions_mask": self.get_actions_mask(info)} - - return np.array(agents_view), info - - def step(self, actions: NDArray) -> Tuple: # Vect auto rest - - agents_view, reward, terminated, truncated, info = self._env.step(actions) - - info = {"actions_mask": self.get_actions_mask(info)} - if self.add_global_state: - info["global_obs"] = self.get_global_obs(agents_view) - - if self.use_shared_rewards: - reward = np.array([np.array(reward).sum()] * self.num_agents) - else: - reward = np.array(reward) - - truncated = [truncated] * self.num_agents - terminated = [terminated] * self.num_agents + agents_view, reward, terminated, truncated, info = super().step(actions) + truncated = np.repeat(truncated, self.num_agents) + terminated = np.repeat(terminated, self.num_agents) + return agents_view, reward, terminated, truncated, info - def get_actions_mask(self, info: Dict) -> NDArray: - if "action_mask" in info: - return np.array(info["action_mask"]) - return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - - def get_global_obs(self, obs: NDArray) -> NDArray: - global_obs = np.concatenate(obs, axis=0) - return np.tile(global_obs, (self.num_agents, 1)) class GymRecordEpisodeMetrics(gymnasium.Wrapper): From 43511fd31ec2e39f9f304493cd8f4c6710c97078 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:50:15 +0100 Subject: [PATCH 059/139] feat: using gymnasium async worker --- mava/utils/make_env.py | 4 +- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 109 +++++++++++++++++++++++--------------- 3 files changed, 69 insertions(+), 46 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index a2dd6ef54..26197a289 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -56,7 +56,7 @@ RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, - _multiagent_worker_shared_memory, + async_multiagent_worker, ) # Registry mapping environment names to their generator and wrapper classes. @@ -244,7 +244,7 @@ def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Enviro envs = gymnasium.vector.AsyncVectorEnv( [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], - worker=_multiagent_worker_shared_memory, + worker=async_multiagent_worker, ) return envs diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 03e2223dc..80cbccc52 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -20,7 +20,7 @@ GymLBFWrapper, GymRecordEpisodeMetrics, GymWrapper, - _multiagent_worker_shared_memory, + async_multiagent_worker, ) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 49dbafd1f..3fec9f47e 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -22,6 +22,13 @@ from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray +import multiprocessing +import sys +import traceback +from copy import deepcopy +from multiprocessing import Queue +from multiprocessing.connection import Connection + # Filter out the warnings warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") @@ -208,76 +215,92 @@ def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: return obs, reward, terminated, truncated, info -# Copied form https://github.com/openai/gym/blob/master/gym/vector/async_vector_env.py +# Copied form https://github.com/Farama-Foundation/Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents -def _multiagent_worker_shared_memory( # noqa: CCR001 +def async_multiagent_worker( index: int, - env_fn: Callable[[], Any], - pipe: Any, - parent_pipe: Any, - shared_memory: Any, - error_queue: Any, -) -> None: - assert shared_memory is not None + env_fn: callable, + pipe: Connection, + parent_pipe: Connection, + shared_memory: multiprocessing.Array | dict[str, Any] | tuple[Any, ...], + error_queue: Queue, +): env = env_fn() observation_space = env.observation_space + action_space = env.action_space + autoreset = False + parent_pipe.close() + try: while True: command, data = pipe.recv() + if command == "reset": observation, info = env.reset(**data) - write_to_shared_memory(observation_space, index, observation, shared_memory) - pipe.send(((None, info), True)) - + if shared_memory: + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + observation = None + autoreset = False + pipe.send(((observation, info), True)) elif command == "step": - ( - observation, - reward, - terminated, - truncated, - info, - ) = env.step(data) - # Handel the dones across all of envs and agents - if np.logical_or(terminated, truncated).all(): - old_observation, old_info = observation, info + if autoreset: observation, info = env.reset() - info["final_observation"] = old_observation - info["final_info"] = old_info - write_to_shared_memory(observation_space, index, observation, shared_memory) - pipe.send(((None, reward, terminated, truncated, info), True)) - elif command == "seed": - env.seed(data) - pipe.send((None, True)) + reward, terminated, truncated = 0, False, False + else: + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + autoreset = np.logical_or(terminated, truncated).all() + + if shared_memory: + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + observation = None + + pipe.send(((observation, reward, terminated, truncated, info), True)) elif command == "close": pipe.send((None, True)) break elif command == "_call": name, args, kwargs = data - if name in ["reset", "step", "seed", "close"]: + if name in ["reset", "step", "close", "_setattr", "_check_spaces"]: raise ValueError( - f"Trying to call function `{name}` with " - f"`_call`. Use `{name}` directly instead." + f"Trying to call function `{name}` with `call`, use `{name}` directly instead." ) - function = getattr(env, name) - if callable(function): - pipe.send((function(*args, **kwargs), True)) + + attr = env.get_wrapper_attr(name) + if callable(attr): + pipe.send((attr(*args, **kwargs), True)) else: - pipe.send((function, True)) + pipe.send((attr, True)) elif command == "_setattr": name, value = data - setattr(env, name, value) + env.set_wrapper_attr(name, value) pipe.send((None, True)) elif command == "_check_spaces": - pipe.send(((data[0] == observation_space, data[1] == env.action_space), True)) + pipe.send( + ( + (data[0] == observation_space, data[1] == action_space), + True, + ) + ) else: raise RuntimeError( - f"Received unknown command `{command}`. Must " - "be one of {`reset`, `step`, `seed`, `close`, `_call`, " - "`_setattr`, `_check_spaces`}." + f"Received unknown command `{command}`. Must be one of [`reset`, `step`, `close`, `_call`, `_setattr`, `_check_spaces`]." ) except (KeyboardInterrupt, Exception): - error_queue.put((index,) + sys.exc_info()[:2]) + error_type, error_message, _ = sys.exc_info() + trace = traceback.format_exc() + + error_queue.put((index, error_type, error_message, trace)) pipe.send((None, False)) finally: - env.close() + env.close() \ No newline at end of file From eaf9a1cb380abb807fc39796ab03f83bc304637b Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 10:58:58 +0100 Subject: [PATCH 060/139] chore: pre-commits and annotaions --- mava/wrappers/gym.py | 55 +++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 3fec9f47e..556fba094 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -13,8 +13,11 @@ # limitations under the License. import sys +import traceback import warnings -from typing import Any, Callable, Dict, Optional, Tuple +from multiprocessing import Queue +from multiprocessing.connection import Connection +from typing import Any, Callable, Dict, Optional, Tuple, Union import gymnasium import numpy as np @@ -22,13 +25,6 @@ from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray -import multiprocessing -import sys -import traceback -from copy import deepcopy -from multiprocessing import Queue -from multiprocessing.connection import Connection - # Filter out the warnings warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") @@ -58,7 +54,7 @@ def __init__( def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: + ) -> Tuple[NDArray, Dict]: if seed is not None: self.env.seed(seed) @@ -71,7 +67,7 @@ def reset( return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) @@ -97,7 +93,7 @@ def get_global_obs(self, obs: NDArray) -> NDArray: class GymLBFWrapper(GymWrapper): - """Wrapper for rware gym environments""" + """Wrapper for LBF gym environments""" def __init__( self, @@ -114,15 +110,14 @@ def __init__( """ super().__init__(env, use_shared_rewards, add_global_state) - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = super().step(actions) truncated = np.repeat(truncated, self.num_agents) terminated = np.repeat(terminated, self.num_agents) - - return agents_view, reward, terminated, truncated, info + return agents_view, reward, terminated, truncated, info class GymRecordEpisodeMetrics(gymnasium.Wrapper): @@ -136,7 +131,7 @@ def __init__(self, env: gymnasium.Env): def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: + ) -> Tuple[NDArray, Dict]: # Reset the env agents_view, info = self._env.reset(seed, options) @@ -202,29 +197,29 @@ def __init__(self, env: gymnasium.Env): def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: + ) -> Tuple[NDArray, Dict]: """Reset the environment.""" obs, info = self.env.reset(seed, options) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, info - def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: + def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: """Step the environment.""" obs, reward, terminated, truncated, info = self.env.step(action) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info -# Copied form https://github.com/Farama-Foundation/Gymnasium/blob/main/gymnasium/vector/async_vector_env.py +# Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents -def async_multiagent_worker( +def async_multiagent_worker( # noqa CCR001 index: int, - env_fn: callable, + env_fn: Callable, pipe: Connection, parent_pipe: Connection, - shared_memory: multiprocessing.Array | dict[str, Any] | tuple[Any, ...], + shared_memory: Union[NDArray, dict[str, Any], tuple[Any, ...]], error_queue: Queue, -): +) -> None: env = env_fn() observation_space = env.observation_space action_space = env.action_space @@ -239,9 +234,7 @@ def async_multiagent_worker( if command == "reset": observation, info = env.reset(**data) if shared_memory: - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) observation = None autoreset = False pipe.send(((observation, info), True)) @@ -260,9 +253,7 @@ def async_multiagent_worker( autoreset = np.logical_or(terminated, truncated).all() if shared_memory: - write_to_shared_memory( - observation_space, index, observation, shared_memory - ) + write_to_shared_memory(observation_space, index, observation, shared_memory) observation = None pipe.send(((observation, reward, terminated, truncated, info), True)) @@ -273,7 +264,8 @@ def async_multiagent_worker( name, args, kwargs = data if name in ["reset", "step", "close", "_setattr", "_check_spaces"]: raise ValueError( - f"Trying to call function `{name}` with `call`, use `{name}` directly instead." + f"Trying to call function `{name}` with \ + `call`, use `{name}` directly instead." ) attr = env.get_wrapper_attr(name) @@ -294,7 +286,8 @@ def async_multiagent_worker( ) else: raise RuntimeError( - f"Received unknown command `{command}`. Must be one of [`reset`, `step`, `close`, `_call`, `_setattr`, `_check_spaces`]." + f"Received unknown command `{command}`. Must be one of \ + [`reset`, `step`, `close`, `_call`, `_setattr`, `_check_spaces`]." ) except (KeyboardInterrupt, Exception): error_type, error_message, _ = sys.exc_info() @@ -303,4 +296,4 @@ def async_multiagent_worker( error_queue.put((index, error_type, error_message, trace)) pipe.send((None, False)) finally: - env.close() \ No newline at end of file + env.close() From 16c0ac3645ed66c519c71b16fa8dd4f2092c9d08 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 14:22:27 +0100 Subject: [PATCH 061/139] fix: config file fixes --- mava/configs/env/lbf_gym.yaml | 4 +++- mava/configs/env/rware_gym.yaml | 4 +++- mava/configs/env/scenario/gym-rware-small-4ag.yaml | 4 ++++ mava/configs/env/scenario/gym-rware-tiny-2ag.yaml | 4 ++++ mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml | 4 ++++ mava/configs/env/scenario/gym-rware-tiny-4ag.yaml | 4 ++++ 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 0c6016dd4..6981f3492 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,5 +1,7 @@ # ---Environment Configs--- -scenario: gym-2s-8x8-2p-2f-coop copy # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] +defaults: + - _self_ + - scenario: gym-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index 4d5e0c7f3..87bd3a473 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -1,5 +1,7 @@ # ---Environment Configs--- -scenario: gym-2s-8x8-2p-2f-coop # [gym-rware-tiny-2ag, gym-rware-tiny-4ag, gym-rware-tiny-4ag-easy, gym-rware-small-4ag] +defaults: + - _self_ + - scenario: gym-rware-tiny-2ag # [gym-rware-tiny-2ag, gym-rware-tiny-4ag, gym-rware-tiny-4ag-easy, gym-rware-small-4ag] env_name: RobotWarehouse # Used for logging purposes. diff --git a/mava/configs/env/scenario/gym-rware-small-4ag.yaml b/mava/configs/env/scenario/gym-rware-small-4ag.yaml index af3eb830b..39f8efa4e 100644 --- a/mava/configs/env/scenario/gym-rware-small-4ag.yaml +++ b/mava/configs/env/scenario/gym-rware-small-4ag.yaml @@ -9,6 +9,10 @@ task_config: n_agents: 4 sensor_range: 1 request_queue_size: 4 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml index e648887a0..95ef11fc2 100644 --- a/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml +++ b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml @@ -9,6 +9,10 @@ task_config: n_agents: 2 sensor_range: 1 request_queue_size: 2 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml index 7d8840882..7753b73ec 100644 --- a/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml +++ b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml @@ -9,6 +9,10 @@ task_config: n_agents: 4 sensor_range: 1 request_queue_size: 8 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml index dbfe55bd4..c28cf92c5 100644 --- a/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml +++ b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml @@ -9,6 +9,10 @@ task_config: n_agents: 4 sensor_range: 1 request_queue_size: 4 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 env_kwargs: {} # there are no scenario specific env_kwargs for this env From 18b928d22b5b5b2ddaae215c1f5fd8c07821ebe6 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 15:47:06 +0100 Subject: [PATCH 062/139] fix: rware import --- mava/utils/make_env.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 26197a289..95c8ea33f 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -21,7 +21,7 @@ import jaxmarl import jumanji import matrax -import rware.warehouse as gym_rware +from rware.warehouse import Warehouse as gym_rware from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment From 19a776599f0c46dcfbb92fa2275ec4880d54c6b8 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 18:48:45 +0100 Subject: [PATCH 063/139] fix: better agent ids wrapper? --- mava/utils/make_env.py | 4 ++-- mava/wrappers/gym.py | 25 ++++++++++++------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 95c8ea33f..e49d6344b 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -21,7 +21,6 @@ import jaxmarl import jumanji import matrax -from rware.warehouse import Warehouse as gym_rware from gigastep import ScenarioBuilder from jaxmarl.environments.smax import map_name_to_scenario from jumanji.env import Environment @@ -39,6 +38,7 @@ ) from lbforaging.foraging import environment as gym_lbf from omegaconf import DictConfig +from rware.warehouse import Warehouse as gym_Warehouse from mava.wrappers import ( AgentIDWrapper, @@ -75,7 +75,7 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} _gym_registry = { - "RobotWarehouse": (gym_rware, GymWrapper), + "RobotWarehouse": (gym_Warehouse, GymWrapper), "LevelBasedForaging": (gym_lbf, GymLBFWrapper), } diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 556fba094..c175dedd7 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -21,7 +21,6 @@ import gymnasium import numpy as np -from gymnasium import spaces from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray @@ -182,18 +181,7 @@ def __init__(self, env: gymnasium.Env): super().__init__(env) self.agent_ids = np.eye(self.env.num_agents) - observation_space = self.env.observation_space[0] - _obs_low, _obs_high, _obs_dtype, _obs_shape = ( - observation_space.low[0], - observation_space.high[0], - observation_space.dtype, - observation_space.shape, - ) - _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) - _observation_boxs = [ - spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) - ] * self.env.num_agents - self.observation_space = spaces.Tuple(_observation_boxs) + self.observation_space = self.modify_space(self.env.observation_space) def reset( self, seed: Optional[int] = None, options: Optional[dict] = None @@ -209,6 +197,17 @@ def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info + def modify_space(self, space: gymnasium.spaces) -> gymnasium.spaces: + if isinstance(space, gymnasium.spaces.Box): + new_shape = space.shape[0] + len(self.agent_ids) + return gymnasium.spaces.Box( + low=space.low, high=space.high, shape=new_shape, dtype=space.dtype + ) + elif isinstance(space, gymnasium.spaces.Tuple): + return gymnasium.spaces.Tuple(self.modify_space(s) for s in space) + else: + raise ValueError(f"Space {type(space)} is not currently supported.") + # Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents From c4a05d69effec40cbdbfd33c700b0adeda52f69b Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 18:55:16 +0100 Subject: [PATCH 064/139] chore: bunch of minor changes --- mava/wrappers/gym.py | 29 +++++------------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index c175dedd7..dcaa6a5ad 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -29,7 +29,10 @@ class GymWrapper(gymnasium.Wrapper): - """Wrapper for gym environments.""" + """Base wrapper for multi-agent gym environments. + This wrapper works out of the box for RobotWarehouse. + See `GymLBFWrapper` for how it can be modified to work for other environments. + """ def __init__( self, @@ -54,7 +57,6 @@ def __init__( def reset( self, seed: Optional[int] = None, options: Optional[dict] = None ) -> Tuple[NDArray, Dict]: - if seed is not None: self.env.seed(seed) @@ -67,7 +69,6 @@ def reset( return np.array(agents_view), info def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: - agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} @@ -92,25 +93,9 @@ def get_global_obs(self, obs: NDArray) -> NDArray: class GymLBFWrapper(GymWrapper): - """Wrapper for LBF gym environments""" - - def __init__( - self, - env: gymnasium.Env, - use_shared_rewards: bool = True, - add_global_state: bool = False, - ): - """Initialise the gym wrapper - Args: - env (gymnasium.env): gymnasium env instance. - use_shared_rewards (bool, optional): Use individual or shared rewards. - Defaults to False. - add_global_state (bool, optional) : Create global observations. Defaults to False. - """ - super().__init__(env, use_shared_rewards, add_global_state) + """Wrapper for the gym level based foraging environment.""" def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: - agents_view, reward, terminated, truncated, info = super().step(actions) truncated = np.repeat(truncated, self.num_agents) @@ -131,8 +116,6 @@ def __init__(self, env: gymnasium.Env): def reset( self, seed: Optional[int] = None, options: Optional[dict] = None ) -> Tuple[NDArray, Dict]: - - # Reset the env agents_view, info = self._env.reset(seed, options) # Create the metrics dict @@ -154,8 +137,6 @@ def reset( return agents_view, info def step(self, actions: NDArray) -> Tuple: - - # Step the env agents_view, reward, terminated, truncated, info = self._env.step(actions) self.running_count_episode_return += float(np.mean(reward)) From 559581885bb520cde72fc5a46b4e11f21bec327f Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 18 Jul 2024 19:13:29 +0100 Subject: [PATCH 065/139] chore : annotation --- mava/wrappers/gym.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index dcaa6a5ad..e7576714d 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -136,7 +136,7 @@ def reset( return agents_view, info - def step(self, actions: NDArray) -> Tuple: + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) self.running_count_episode_return += float(np.mean(reward)) From 29b1303214c29bc3f129b027f6112432e885d662 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 12:05:35 +0100 Subject: [PATCH 066/139] chore: comments --- mava/wrappers/gym.py | 1 + requirements/requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index e7576714d..18d3ede73 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -230,6 +230,7 @@ def async_multiagent_worker( # noqa CCR001 truncated, info, ) = env.step(data) + # The autoreset was modified to work with boolean arrays. autoreset = np.logical_or(terminated, truncated).all() if shared_memory: diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 74b07af25..0c68a3ca5 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -10,7 +10,7 @@ jax jaxlib jaxmarl jumanji @ git+https://github.com/sash-a/jumanji -lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration +lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration # fixes: https://github.com/semitable/lb-foraging/issues/20 matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 mujoco-mjx==3.1.3 @@ -19,7 +19,7 @@ numpy omegaconf optax protobuf~=3.20 -rware @ git+https://github.com/RuanJohn/robotic-warehouse.git +rware @ git+https://github.com/RuanJohn/robotic-warehouse.git # compatibility with latest gymnasium scipy==1.12.0 tensorboard_logger tensorflow_probability From 669dfbd044998fedd961c3fbb0c192d5b07d8fd5 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 13:08:10 +0100 Subject: [PATCH 067/139] feat: restructured the folders --- mava/systems/{anakin => }/ppo/__init__.py | 0 mava/systems/{anakin/ppo => ppo/anakin}/ff_ippo.py | 2 +- mava/systems/{anakin/ppo => ppo/anakin}/ff_mappo.py | 2 +- mava/systems/{anakin/ppo => ppo/anakin}/rec_ippo.py | 2 +- mava/systems/{anakin/ppo => ppo/anakin}/rec_mappo.py | 2 +- mava/systems/{sebulba/ppo => ppo/sebulba}/ff_ippo.py | 0 mava/systems/{anakin => }/ppo/types.py | 0 mava/systems/{anakin => }/q_learning/__init__.py | 0 .../systems/{anakin/q_learning => q_learning/anakin}/rec_iql.py | 2 +- mava/systems/{anakin => }/q_learning/types.py | 0 mava/systems/{anakin => }/sac/__init__.py | 0 mava/systems/{anakin/sac => sac/anakin}/ff_isac.py | 2 +- mava/systems/{anakin/sac => sac/anakin}/ff_masac.py | 2 +- mava/systems/{anakin => }/sac/types.py | 0 mava/utils/checkpointing.py | 2 +- 15 files changed, 8 insertions(+), 8 deletions(-) rename mava/systems/{anakin => }/ppo/__init__.py (100%) rename mava/systems/{anakin/ppo => ppo/anakin}/ff_ippo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/ff_mappo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/rec_ippo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/rec_mappo.py (99%) rename mava/systems/{sebulba/ppo => ppo/sebulba}/ff_ippo.py (100%) rename mava/systems/{anakin => }/ppo/types.py (100%) rename mava/systems/{anakin => }/q_learning/__init__.py (100%) rename mava/systems/{anakin/q_learning => q_learning/anakin}/rec_iql.py (99%) rename mava/systems/{anakin => }/q_learning/types.py (100%) rename mava/systems/{anakin => }/sac/__init__.py (100%) rename mava/systems/{anakin/sac => sac/anakin}/ff_isac.py (99%) rename mava/systems/{anakin/sac => sac/anakin}/ff_masac.py (99%) rename mava/systems/{anakin => }/sac/types.py (100%) diff --git a/mava/systems/anakin/ppo/__init__.py b/mava/systems/ppo/__init__.py similarity index 100% rename from mava/systems/anakin/ppo/__init__.py rename to mava/systems/ppo/__init__.py diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py similarity index 99% rename from mava/systems/anakin/ppo/ff_ippo.py rename to mava/systems/ppo/anakin/ff_ippo.py index 51efd10e7..44e196535 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -32,7 +32,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py similarity index 99% rename from mava/systems/anakin/ppo/ff_mappo.py rename to mava/systems/ppo/anakin/ff_mappo.py index a9364fdfc..7f7dce965 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -31,7 +31,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py similarity index 99% rename from mava/systems/anakin/ppo/rec_ippo.py rename to mava/systems/ppo/anakin/rec_ippo.py index a4d3df428..1f962aa38 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.anakin.ppo.types import ( +from mava.systems.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py similarity index 99% rename from mava/systems/anakin/ppo/rec_mappo.py rename to mava/systems/ppo/anakin/rec_mappo.py index 93736cf10..0afb3a6c2 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.anakin.ppo.types import ( +from mava.systems.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py similarity index 100% rename from mava/systems/sebulba/ppo/ff_ippo.py rename to mava/systems/ppo/sebulba/ff_ippo.py diff --git a/mava/systems/anakin/ppo/types.py b/mava/systems/ppo/types.py similarity index 100% rename from mava/systems/anakin/ppo/types.py rename to mava/systems/ppo/types.py diff --git a/mava/systems/anakin/q_learning/__init__.py b/mava/systems/q_learning/__init__.py similarity index 100% rename from mava/systems/anakin/q_learning/__init__.py rename to mava/systems/q_learning/__init__.py diff --git a/mava/systems/anakin/q_learning/rec_iql.py b/mava/systems/q_learning/anakin/rec_iql.py similarity index 99% rename from mava/systems/anakin/q_learning/rec_iql.py rename to mava/systems/q_learning/anakin/rec_iql.py index 89139277a..c4d31aade 100644 --- a/mava/systems/anakin/q_learning/rec_iql.py +++ b/mava/systems/q_learning/anakin/rec_iql.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import RecQNetwork, ScannedRNN -from mava.systems.anakin.q_learning.types import ( +from mava.systems.q_learning.types import ( ActionSelectionState, ActionState, LearnerState, diff --git a/mava/systems/anakin/q_learning/types.py b/mava/systems/q_learning/types.py similarity index 100% rename from mava/systems/anakin/q_learning/types.py rename to mava/systems/q_learning/types.py diff --git a/mava/systems/anakin/sac/__init__.py b/mava/systems/sac/__init__.py similarity index 100% rename from mava/systems/anakin/sac/__init__.py rename to mava/systems/sac/__init__.py diff --git a/mava/systems/anakin/sac/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py similarity index 99% rename from mava/systems/anakin/sac/ff_isac.py rename to mava/systems/sac/anakin/ff_isac.py index 1642176f3..d6963ab5c 100644 --- a/mava/systems/anakin/sac/ff_isac.py +++ b/mava/systems/sac/anakin/ff_isac.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork -from mava.systems.anakin.sac.types import ( +from mava.systems.sac.types import ( BufferState, LearnerState, Metrics, diff --git a/mava/systems/anakin/sac/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py similarity index 99% rename from mava/systems/anakin/sac/ff_masac.py rename to mava/systems/sac/anakin/ff_masac.py index 2367a67a4..c256018e9 100644 --- a/mava/systems/anakin/sac/ff_masac.py +++ b/mava/systems/sac/anakin/ff_masac.py @@ -34,7 +34,7 @@ from mava.evaluator import make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardQNet as QNetwork -from mava.systems.anakin.sac.types import ( +from mava.systems.sac.types import ( BufferState, LearnerState, Metrics, diff --git a/mava/systems/anakin/sac/types.py b/mava/systems/sac/types.py similarity index 100% rename from mava/systems/anakin/sac/types.py rename to mava/systems/sac/types.py diff --git a/mava/utils/checkpointing.py b/mava/utils/checkpointing.py index 230c4938d..8955f76ce 100644 --- a/mava/utils/checkpointing.py +++ b/mava/utils/checkpointing.py @@ -24,7 +24,7 @@ from jax.tree_util import tree_map from omegaconf import DictConfig, OmegaConf -from mava.systems.anakin.ppo.types import HiddenStates, Params +from mava.systems.ppo.types import HiddenStates, Params from mava.types import MavaState # Keep track of the version of the checkpointer From d1f8364cd3a70cfa7bebdea6709044f1f770fc42 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 14:03:18 +0100 Subject: [PATCH 068/139] update the gym wrappers --- mava/configs/arch/anakin.yaml | 3 +- mava/configs/arch/sebulba.yaml | 8 +- mava/configs/default_ff_ippo.yaml | 2 +- mava/configs/env/lbf_gym.yaml | 19 ++ mava/configs/env/rware_gym.yaml | 19 ++ .../env/scenario/gym-lbf-10x10-3p-3f.yaml | 15 ++ .../env/scenario/gym-lbf-15x15-3p-5f.yaml | 15 ++ .../env/scenario/gym-lbf-15x15-4p-3f.yaml | 15 ++ .../env/scenario/gym-lbf-15x15-4p-5f.yaml | 15 ++ .../env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 15 ++ .../scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml | 15 ++ .../env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 15 ++ .../env/scenario/gym-rware-small-4ag.yaml | 18 ++ .../env/scenario/gym-rware-tiny-2ag.yaml | 18 ++ .../env/scenario/gym-rware-tiny-4ag-easy.yaml | 18 ++ .../env/scenario/gym-rware-tiny-4ag.yaml | 18 ++ mava/configs/system/ppo/ff_ippo.yaml | 6 +- mava/utils/logger.py | 5 +- mava/utils/make_env.py | 45 ++-- mava/wrappers/__init__.py | 4 +- mava/wrappers/gym.py | 242 ++++++++---------- requirements/requirements.txt | 4 +- 22 files changed, 362 insertions(+), 172 deletions(-) create mode 100644 mava/configs/env/lbf_gym.yaml create mode 100644 mava/configs/env/rware_gym.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml create mode 100644 mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml create mode 100644 mava/configs/env/scenario/gym-rware-small-4ag.yaml create mode 100644 mava/configs/env/scenario/gym-rware-tiny-2ag.yaml create mode 100644 mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml create mode 100644 mava/configs/env/scenario/gym-rware-tiny-4ag.yaml diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index d58d85286..eb948b7a1 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -1,5 +1,6 @@ # --- Anakin config --- -arch_name: "Anakin" +architecture_name: anakin + # --- Training --- num_envs: 16 # Number of vectorised environments per device. diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index e0305e2dc..0b539059b 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -1,6 +1,8 @@ # --- Sebulba config --- -arch_name: "Sebulba" -num_envs: 32 # number of envs per thread +architecture_name: sebulba + +# --- Training --- +num_envs: 32 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select @@ -12,6 +14,6 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- -n_threads_per_executor: 2 # num of different threads/env batches per actor +n_threads_per_executor: 1 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices diff --git a/mava/configs/default_ff_ippo.yaml b/mava/configs/default_ff_ippo.yaml index d942584ce..c4aa6ea49 100644 --- a/mava/configs/default_ff_ippo.yaml +++ b/mava/configs/default_ff_ippo.yaml @@ -3,5 +3,5 @@ defaults: - arch: anakin - system: ppo/ff_ippo - network: mlp - - env: rware + - env: rware_gym - _self_ diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml new file mode 100644 index 000000000..6981f3492 --- /dev/null +++ b/mava/configs/env/lbf_gym.yaml @@ -0,0 +1,19 @@ +# ---Environment Configs--- +defaults: + - _self_ + - scenario: gym-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] + +env_name: LevelBasedForaging # Used for logging purposes. + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the add agents IDs to the observations returned by the environment. +add_agent_id : False + +# Whether or not to log the winrate of this environment. +log_win_rate: False + +# Weather or not to sum the returned rewards over all of the agents. +use_shared_rewards: True diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml new file mode 100644 index 000000000..87bd3a473 --- /dev/null +++ b/mava/configs/env/rware_gym.yaml @@ -0,0 +1,19 @@ +# ---Environment Configs--- +defaults: + - _self_ + - scenario: gym-rware-tiny-2ag # [gym-rware-tiny-2ag, gym-rware-tiny-4ag, gym-rware-tiny-4ag-easy, gym-rware-small-4ag] + +env_name: RobotWarehouse # Used for logging purposes. + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the add agents IDs to the observations returned by the environment. +add_agent_id : False + +# Whether or not to log the winrate of this environment. +log_win_rate: False + +# Weather or not to sum the returned rewards over all of the agents. +use_shared_rewards: True diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml new file mode 100644 index 000000000..386431be4 --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 10x10-3p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 10x10-3p-3f + +task_config: + field_size: [10,10] + sight: 10 + num_agents: 3 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml new file mode 100644 index 000000000..1a8380511 --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-3p-5f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-3p-5f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 3 + max_food: 5 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml new file mode 100644 index 000000000..fa22f737b --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-4p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-4p-3f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 4 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml new file mode 100644 index 000000000..28937215c --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml @@ -0,0 +1,15 @@ +# The config of the 15x15-4p-5f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 15x15-4p-5f + +task_config: + field_size: [15, 15] + sight: 15 + num_agents: 4 + max_food: 5 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml new file mode 100644 index 000000000..f0262eb8d --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml @@ -0,0 +1,15 @@ +# The config of the 2s10x10-3p-3f scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 2s-10x10-3p-3f + +task_config: + field_size: [10, 10] + sight: 2 + num_agents: 3 + max_food: 3 + max_player_level: 2 + force_coop: False + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml new file mode 100644 index 000000000..ffdc5be0e --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml @@ -0,0 +1,15 @@ +# The config of the 2s-8x8-2p-2f-coop scenario with the VectorObserver set as default. +name: LevelBasedForaging +task_name: 2s-8x8-2p-2f-coop + +task_config: + field_size: [8, 8] # size of the grid to generate. + sight: 2 # field of view of an agent. + num_agents: 2 # number of agents on the grid. + max_food: 2 # number of food in the environment. + max_player_level: 2 # maximum level of the agents (inclusive). + force_coop: True # force cooperation between agents. + max_episode_steps: 50 # max number of steps per episode. + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml new file mode 100644 index 000000000..52519fecb --- /dev/null +++ b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml @@ -0,0 +1,15 @@ +# The config of the 8x8-2p-2f-coop scenario with the VectorObserver set as default +name: LevelBasedForaging +task_name: 8x8-2p-2f-coop + +task_config: + field_size: [8, 8] + sight: 8 + num_agents: 2 + max_food: 2 + max_player_level: 2 + force_coop: True + max_episode_steps: 50 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-small-4ag.yaml b/mava/configs/env/scenario/gym-rware-small-4ag.yaml new file mode 100644 index 000000000..39f8efa4e --- /dev/null +++ b/mava/configs/env/scenario/gym-rware-small-4ag.yaml @@ -0,0 +1,18 @@ +# The config of the small-4ag environment +name: RobotWarehouse +task_name: small-4ag + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 4 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml new file mode 100644 index 000000000..95ef11fc2 --- /dev/null +++ b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml @@ -0,0 +1,18 @@ +# The config of the tiny-2ag environment +name: RobotWarehouse +task_name: tiny-2ag + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 2 + sensor_range: 1 + request_queue_size: 2 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml new file mode 100644 index 000000000..7753b73ec --- /dev/null +++ b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml @@ -0,0 +1,18 @@ +# The config of the tiny-4ag-easy environment +name: RobotWarehouse +task_name: tiny-4ag-easy + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 8 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml new file mode 100644 index 000000000..c28cf92c5 --- /dev/null +++ b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml @@ -0,0 +1,18 @@ +# The config of the tiny_4ag environment +name: RobotWarehouse +task_name: tiny-4ag + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + n_agents: 4 + sensor_range: 1 + request_queue_size: 4 + msg_bits : 0 + max_inactivity_steps : null + max_steps : 500 + reward_type : 0 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index c80b43ec8..9efb0611a 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -9,12 +9,12 @@ seed: 42 add_agent_id: True # --- RL hyperparameters --- -actor_lr: 0.0005 # Learning rate for actor network -critic_lr: 0.0005 # Learning rate for critic network +actor_lr: 2.5e-4 # Learning rate for actor network +critic_lr: 2.5e-4 # Learning rate for critic network update_batch_size: 2 # Number of vectorised gradient updates per device. rollout_length: 128 # Number of environment steps per vectorised environment. ppo_epochs: 4 # Number of ppo epochs per training data batch. -num_minibatches: 1 # Number of minibatches per ppo epoch. +num_minibatches: 2 # Number of minibatches per ppo epoch. gamma: 0.99 # Discounting factor. gae_lambda: 0.95 # Lambda value for GAE computation. clip_eps: 0.2 # Clipping value for PPO updates and value function. diff --git a/mava/utils/logger.py b/mava/utils/logger.py index dc217f263..4edad361e 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -150,9 +150,8 @@ class NeptuneLogger(BaseLogger): def __init__(self, cfg: DictConfig, unique_token: str) -> None: tags = list(cfg.logger.kwargs.neptune_tag) project = cfg.logger.kwargs.neptune_project - mode = "sync" if cfg.arch.arch_name == "Sebulba" else "async" - self.logger = neptune.init_run(project=project, tags=tags, mode=mode) + self.logger = neptune.init_run(project=project, tags=tags) self.logger["config"] = stringify_unsupported(cfg) self.detailed_logging = cfg.logger.kwargs.detailed_neptune_logging @@ -338,7 +337,7 @@ def get_logger_path(config: DictConfig, logger_type: str) -> str: def describe(x: ArrayLike) -> Union[Dict[str, ArrayLike], ArrayLike]: """Generate summary statistics for an array of metrics (mean, std, min, max).""" - if not (isinstance(x, jax.Array) or isinstance(x, np.ndarray)) or x.size <= 1: + if not isinstance(x, jax.Array) or x.size <= 1: return x # np instead of jnp because we don't jit here diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 9828573e0..e49d6344b 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -14,10 +14,10 @@ from typing import Tuple -import gym -import gym.vector -import gym.wrappers -import gym.wrappers.compatibility +import gymnasium +import gymnasium.vector +import gymnasium.wrappers +import gymnasium.wrappers.compatibility import jaxmarl import jumanji import matrax @@ -36,7 +36,9 @@ from jumanji.environments.routing.robot_warehouse.generator import ( RandomGenerator as RwareRandomGenerator, ) +from lbforaging.foraging import environment as gym_lbf from omegaconf import DictConfig +from rware.warehouse import Warehouse as gym_Warehouse from mava.wrappers import ( AgentIDWrapper, @@ -47,14 +49,14 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, - GymRwareWrapper, + GymWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, - _multiagent_worker_shared_memory, + async_multiagent_worker, ) # Registry mapping environment names to their generator and wrapper classes. @@ -72,7 +74,10 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} -_gym_registry = {"RobotWarehouse": GymRwareWrapper, "LevelBasedForaging": GymLBFWrapper} +_gym_registry = { + "RobotWarehouse": (gym_Warehouse, GymWrapper), + "LevelBasedForaging": (gym_lbf, GymLBFWrapper), +} def add_extra_wrappers( @@ -214,9 +219,9 @@ def make_gym_env( config: DictConfig, num_env: int, add_global_state: bool = False, -) -> Environment: # todo : create the appropriate annotation for the sync vector +) -> gymnasium.vector.AsyncVectorEnv: """ - Create a Gym environment. + Create a gymnasium environment. Args: config (Dict): The configuration of the environment. @@ -226,22 +231,20 @@ def make_gym_env( Returns: Async environments. """ - base_env_name = config.env.env_name - wrapper = _gym_registry[base_env_name] - - def create_gym_env( - config: DictConfig, add_global_state: bool = False - ) -> Environment: # todo: add the RecordEpisodeMetrics for gym. - env = gym.make(config.env.scenario) - wrapped_env = wrapper(env, config.env.use_individual_rewards, add_global_state) - if not config.env.implicit_agent_id: - wrapped_env = GymAgentIDWrapper(wrapped_env) # todo : add agent id wrapper for gym . + base_env_name = config.env.scenario.name + env_maker, wrapper = _gym_registry[base_env_name] + + def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Environment: + env = env_maker(**config.env.scenario.task_config) + wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) + if config.env.add_agent_id: + wrapped_env = GymAgentIDWrapper(wrapped_env) wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env - envs = gym.vector.AsyncVectorEnv( # todo : give them more descriptive names + envs = gymnasium.vector.AsyncVectorEnv( [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], - worker=_multiagent_worker_shared_memory, + worker=async_multiagent_worker, ) return envs diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 869e78053..80cbccc52 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -19,8 +19,8 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, - GymRwareWrapper, - _multiagent_worker_shared_memory, + GymWrapper, + async_multiagent_worker, ) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper from mava.wrappers.jumanji import ( diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index a9bc5af8e..18d3ede73 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -13,46 +13,50 @@ # limitations under the License. import sys +import traceback import warnings -from typing import Any, Callable, Dict, Optional, Tuple +from multiprocessing import Queue +from multiprocessing.connection import Connection +from typing import Any, Callable, Dict, Optional, Tuple, Union -import gym +import gymnasium import numpy as np -from gym import spaces -from gym.vector.utils import write_to_shared_memory +from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray # Filter out the warnings -warnings.filterwarnings("ignore", module="gym.utils.passive_env_checker") +warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") -class GymRwareWrapper(gym.Wrapper): - """Wrapper for rware gym environments.""" +class GymWrapper(gymnasium.Wrapper): + """Base wrapper for multi-agent gym environments. + This wrapper works out of the box for RobotWarehouse. + See `GymLBFWrapper` for how it can be modified to work for other environments. + """ def __init__( self, - env: gym.Env, - use_individual_rewards: bool = False, + env: gymnasium.Env, + use_shared_rewards: bool = True, add_global_state: bool = False, ): - """Initialize the gym wrapper + """Initialise the gym wrapper Args: - env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. + env (gymnasium.env): gymnasium env instance. + use_shared_rewards (bool, optional): Use individual or shared rewards. Defaults to False. add_global_state (bool, optional) : Create global observations. Defaults to False. """ super().__init__(env) self._env = env - self.use_individual_rewards = use_individual_rewards + self.use_shared_rewards = use_shared_rewards self.add_global_state = add_global_state self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: - + ) -> Tuple[NDArray, Dict]: if seed is not None: self.env.seed(seed) @@ -64,18 +68,17 @@ def reset( return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple: - + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) - if self.use_individual_rewards: - reward = np.array(reward) + if self.use_shared_rewards: + reward = np.array([np.array(reward).sum()] * self.num_agents) else: - reward = np.array([np.array(reward).mean()] * self.num_agents) + reward = np.array(reward) return agents_view, reward, terminated, truncated, info @@ -89,68 +92,22 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class GymLBFWrapper(gym.Wrapper): - """Wrapper for rware gym environments""" +class GymLBFWrapper(GymWrapper): + """Wrapper for the gym level based foraging environment.""" - def __init__( - self, - env: gym.Env, - use_individual_rewards: bool = False, - add_global_state: bool = False, - ): - """Initialize the gym wrapper - Args: - env (gym.env): gym env instance. - use_individual_rewards (bool, optional): Use individual or group rewards. - Defaults to False. - add_global_state (bool, optional) : Create global observations. Defaults to False. - """ - super().__init__(env) - self._env = env # not having _env leaded tp self.env getting replaced --> circular called - self.use_individual_rewards = use_individual_rewards - self.add_global_state = add_global_state # todo : add the global observations - self.num_agents = len(self._env.action_space) - self.num_actions = self._env.action_space[ - 0 - ].n # todo: all the agents must have the same num_actions, add assertion? - - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> Tuple: - - if seed is not None: - self.env.seed(seed) - - agents_view, info = self._env.reset() + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: + agents_view, reward, terminated, truncated, info = super().step(actions) - info = {"actions_mask": self.get_actions_mask(info)} - - return np.array(agents_view), info - - def step(self, actions: NDArray) -> Tuple: # Vect auto rest - - agents_view, reward, terminated, truncated, info = self._env.step(actions) - - info = {"actions_mask": self.get_actions_mask(info)} - - if self.use_individual_rewards: - reward = np.array(reward) - else: - reward = np.array([np.array(reward).sum()] * self.num_agents) - - truncated = [truncated] * self.num_agents - terminated = [terminated] * self.num_agents + truncated = np.repeat(truncated, self.num_agents) + terminated = np.repeat(terminated, self.num_agents) return agents_view, reward, terminated, truncated, info - def get_actions_mask(self, info: Dict) -> NDArray: - if "action_mask" in info: - return np.array(info["action_mask"]) - return np.ones((self.num_agents, self.num_actions), dtype=np.float32) - -class GymRecordEpisodeMetrics(gym.Wrapper): +class GymRecordEpisodeMetrics(gymnasium.Wrapper): """Record the episode returns and lengths.""" - def __init__(self, env: gym.Env): + def __init__(self, env: gymnasium.Env): super().__init__(env) self._env = env self.running_count_episode_return = 0.0 @@ -158,9 +115,7 @@ def __init__(self, env: gym.Env): def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: - - # Reset the env + ) -> Tuple[NDArray, Dict]: agents_view, info = self._env.reset(seed, options) # Create the metrics dict @@ -181,9 +136,7 @@ def reset( return agents_view, info - def step(self, actions: NDArray) -> Tuple: - - # Step the env + def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) self.running_count_episode_return += float(np.mean(reward)) @@ -202,111 +155,126 @@ def step(self, actions: NDArray) -> Tuple: return agents_view, reward, terminated, truncated, info -class GymAgentIDWrapper(gym.Wrapper): +class GymAgentIDWrapper(gymnasium.Wrapper): """Add one hot agent IDs to observation.""" - def __init__(self, env: gym.Env): + def __init__(self, env: gymnasium.Env): super().__init__(env) self.agent_ids = np.eye(self.env.num_agents) - observation_space = self.env.observation_space[0] - _obs_low, _obs_high, _obs_dtype, _obs_shape = ( - observation_space.low[0], - observation_space.high[0], - observation_space.dtype, - observation_space.shape, - ) - _new_obs_shape = (_obs_shape[0] + self.env.num_agents,) - _observation_boxs = [ - spaces.Box(low=_obs_low, high=_obs_high, shape=_new_obs_shape, dtype=_obs_dtype) - ] * self.env.num_agents - self.observation_space = spaces.Tuple(_observation_boxs) + self.observation_space = self.modify_space(self.env.observation_space) def reset( self, seed: Optional[int] = None, options: Optional[dict] = None - ) -> Tuple[np.ndarray, Dict]: + ) -> Tuple[NDArray, Dict]: """Reset the environment.""" obs, info = self.env.reset(seed, options) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, info - def step(self, action: list) -> Tuple[np.ndarray, float, bool, bool, Dict]: + def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: """Step the environment.""" obs, reward, terminated, truncated, info = self.env.step(action) obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info + def modify_space(self, space: gymnasium.spaces) -> gymnasium.spaces: + if isinstance(space, gymnasium.spaces.Box): + new_shape = space.shape[0] + len(self.agent_ids) + return gymnasium.spaces.Box( + low=space.low, high=space.high, shape=new_shape, dtype=space.dtype + ) + elif isinstance(space, gymnasium.spaces.Tuple): + return gymnasium.spaces.Tuple(self.modify_space(s) for s in space) + else: + raise ValueError(f"Space {type(space)} is not currently supported.") + -# Copied form https://github.com/openai/gym/blob/master/gym/vector/async_vector_env.py +# Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents -def _multiagent_worker_shared_memory( # noqa: CCR001 +def async_multiagent_worker( # noqa CCR001 index: int, - env_fn: Callable[[], Any], - pipe: Any, - parent_pipe: Any, - shared_memory: Any, - error_queue: Any, + env_fn: Callable, + pipe: Connection, + parent_pipe: Connection, + shared_memory: Union[NDArray, dict[str, Any], tuple[Any, ...]], + error_queue: Queue, ) -> None: - assert shared_memory is not None env = env_fn() observation_space = env.observation_space + action_space = env.action_space + autoreset = False + parent_pipe.close() + try: while True: command, data = pipe.recv() + if command == "reset": observation, info = env.reset(**data) - write_to_shared_memory(observation_space, index, observation, shared_memory) - pipe.send(((None, info), True)) - + if shared_memory: + write_to_shared_memory(observation_space, index, observation, shared_memory) + observation = None + autoreset = False + pipe.send(((observation, info), True)) elif command == "step": - ( - observation, - reward, - terminated, - truncated, - info, - ) = env.step(data) - # Handel the dones across all of envs and agents - if np.logical_or(terminated, truncated).all(): - old_observation, old_info = observation, info + if autoreset: observation, info = env.reset() - info["final_observation"] = old_observation - info["final_info"] = old_info - write_to_shared_memory(observation_space, index, observation, shared_memory) - pipe.send(((None, reward, terminated, truncated, info), True)) - elif command == "seed": - env.seed(data) - pipe.send((None, True)) + reward, terminated, truncated = 0, False, False + else: + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + # The autoreset was modified to work with boolean arrays. + autoreset = np.logical_or(terminated, truncated).all() + + if shared_memory: + write_to_shared_memory(observation_space, index, observation, shared_memory) + observation = None + + pipe.send(((observation, reward, terminated, truncated, info), True)) elif command == "close": pipe.send((None, True)) break elif command == "_call": name, args, kwargs = data - if name in ["reset", "step", "seed", "close"]: + if name in ["reset", "step", "close", "_setattr", "_check_spaces"]: raise ValueError( - f"Trying to call function `{name}` with " - f"`_call`. Use `{name}` directly instead." + f"Trying to call function `{name}` with \ + `call`, use `{name}` directly instead." ) - function = getattr(env, name) - if callable(function): - pipe.send((function(*args, **kwargs), True)) + + attr = env.get_wrapper_attr(name) + if callable(attr): + pipe.send((attr(*args, **kwargs), True)) else: - pipe.send((function, True)) + pipe.send((attr, True)) elif command == "_setattr": name, value = data - setattr(env, name, value) + env.set_wrapper_attr(name, value) pipe.send((None, True)) elif command == "_check_spaces": - pipe.send(((data[0] == observation_space, data[1] == env.action_space), True)) + pipe.send( + ( + (data[0] == observation_space, data[1] == action_space), + True, + ) + ) else: raise RuntimeError( - f"Received unknown command `{command}`. Must " - "be one of {`reset`, `step`, `seed`, `close`, `_call`, " - "`_setattr`, `_check_spaces`}." + f"Received unknown command `{command}`. Must be one of \ + [`reset`, `step`, `close`, `_call`, `_setattr`, `_check_spaces`]." ) except (KeyboardInterrupt, Exception): - error_queue.put((index,) + sys.exc_info()[:2]) + error_type, error_message, _ = sys.exc_info() + trace = traceback.format_exc() + + error_queue.put((index, error_type, error_message, trace)) pipe.send((None, False)) finally: env.close() diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 3b3bc4c58..0c68a3ca5 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -3,12 +3,14 @@ distrax @ git+https://github.com/google-deepmind/distrax # distrax release does flashbax~=0.1.0 flax gigastep @ git+https://github.com/mlech26l/gigastep +gymnasium hydra-core==1.3.2 id-marl-eval @ git+https://github.com/instadeepai/marl-eval jax jaxlib jaxmarl jumanji @ git+https://github.com/sash-a/jumanji +lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration # fixes: https://github.com/semitable/lb-foraging/issues/20 matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 mujoco-mjx==3.1.3 @@ -17,7 +19,7 @@ numpy omegaconf optax protobuf~=3.20 -rware @ git+https://github.com/RuanJohn/robotic-warehouse.git +rware @ git+https://github.com/RuanJohn/robotic-warehouse.git # compatibility with latest gymnasium scipy==1.12.0 tensorboard_logger tensorflow_probability From dc641c6a6f2f16042304de47e00ba8523b7ce59b Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 14:48:56 +0100 Subject: [PATCH 069/139] folder re-structuring --- mava/configs/default_ff_ippo_seb.yaml | 2 +- mava/configs/env/gym.yaml | 21 ------------------- mava/systems/anakin/sac/__init__.py | 13 ------------ mava/systems/{anakin => ppo}/__init__.py | 0 .../{anakin/ppo => ppo/anakin}/ff_ippo.py | 2 +- .../{anakin/ppo => ppo/anakin}/ff_mappo.py | 2 +- .../{anakin/ppo => ppo/anakin}/rec_ippo.py | 2 +- .../{anakin/ppo => ppo/anakin}/rec_mappo.py | 2 +- .../{sebulba/ppo => ppo/sebulba}/ff_ippo.py | 4 ++-- mava/systems/{anakin => }/ppo/types.py | 0 .../{anakin/ppo => q_learning}/__init__.py | 0 .../anakin}/rec_iql.py | 0 mava/systems/{anakin => }/q_learning/types.py | 0 .../{anakin/q_learning => sac}/__init__.py | 0 .../{anakin/sac => sac/anakin}/ff_isac.py | 0 .../{anakin/sac => sac/anakin}/ff_masac.py | 0 mava/systems/{anakin => }/sac/types.py | 0 mava/utils/checkpointing.py | 2 +- 18 files changed, 8 insertions(+), 42 deletions(-) delete mode 100644 mava/configs/env/gym.yaml delete mode 100644 mava/systems/anakin/sac/__init__.py rename mava/systems/{anakin => ppo}/__init__.py (100%) rename mava/systems/{anakin/ppo => ppo/anakin}/ff_ippo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/ff_mappo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/rec_ippo.py (99%) rename mava/systems/{anakin/ppo => ppo/anakin}/rec_mappo.py (99%) rename mava/systems/{sebulba/ppo => ppo/sebulba}/ff_ippo.py (99%) rename mava/systems/{anakin => }/ppo/types.py (100%) rename mava/systems/{anakin/ppo => q_learning}/__init__.py (100%) rename mava/systems/{anakin/q_learning => q_learning/anakin}/rec_iql.py (100%) rename mava/systems/{anakin => }/q_learning/types.py (100%) rename mava/systems/{anakin/q_learning => sac}/__init__.py (100%) rename mava/systems/{anakin/sac => sac/anakin}/ff_isac.py (100%) rename mava/systems/{anakin/sac => sac/anakin}/ff_masac.py (100%) rename mava/systems/{anakin => }/sac/types.py (100%) diff --git a/mava/configs/default_ff_ippo_seb.yaml b/mava/configs/default_ff_ippo_seb.yaml index 1002d90c4..204719232 100644 --- a/mava/configs/default_ff_ippo_seb.yaml +++ b/mava/configs/default_ff_ippo_seb.yaml @@ -3,5 +3,5 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp - - env: gym + - env: rware_gym - _self_ diff --git a/mava/configs/env/gym.yaml b/mava/configs/env/gym.yaml deleted file mode 100644 index 9ddd16d41..000000000 --- a/mava/configs/env/gym.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ---Environment Configs--- - -scenario: rware:rware-tiny-4ag-v1 #Foraging-8x8-2p-1f-v2 #rware:rware-tiny-2ag-v1 # [tiny-2ag, tiny-4ag, tiny-4ag-easy, small-4ag] - -env_name: RobotWarehouse #LevelBasedForaging # Used for logging purposes. - -# Defines the metric that will be used to evaluate the performance of the agent. -# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. -eval_metric: episode_return - -# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. -# This should not be changed. -implicit_agent_id: False -# Whether or not to log the winrate of this environment. This should not be changed as not all -# environments have a winrate metric. -log_win_rate: False - -use_individual_rewards: True - -kwargs: - time_limit: 500 diff --git a/mava/systems/anakin/sac/__init__.py b/mava/systems/anakin/sac/__init__.py deleted file mode 100644 index 21db9ec1c..000000000 --- a/mava/systems/anakin/sac/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/mava/systems/anakin/__init__.py b/mava/systems/ppo/__init__.py similarity index 100% rename from mava/systems/anakin/__init__.py rename to mava/systems/ppo/__init__.py diff --git a/mava/systems/anakin/ppo/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py similarity index 99% rename from mava/systems/anakin/ppo/ff_ippo.py rename to mava/systems/ppo/anakin/ff_ippo.py index 408bdf36d..7c93f887d 100644 --- a/mava/systems/anakin/ppo/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -32,7 +32,7 @@ from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py similarity index 99% rename from mava/systems/anakin/ppo/ff_mappo.py rename to mava/systems/ppo/anakin/ff_mappo.py index 93d3f2c0b..17a5cbfcf 100644 --- a/mava/systems/anakin/ppo/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -31,7 +31,7 @@ from mava.evaluator import make_anakin_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer diff --git a/mava/systems/anakin/ppo/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py similarity index 99% rename from mava/systems/anakin/ppo/rec_ippo.py rename to mava/systems/ppo/anakin/rec_ippo.py index 583cd7acc..75f751dd1 100644 --- a/mava/systems/anakin/ppo/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.anakin.ppo.types import ( +from mava.systems.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/anakin/ppo/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py similarity index 99% rename from mava/systems/anakin/ppo/rec_mappo.py rename to mava/systems/ppo/anakin/rec_mappo.py index 74179ab34..3534b96b8 100644 --- a/mava/systems/anakin/ppo/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -33,7 +33,7 @@ from mava.networks import RecurrentActor as Actor from mava.networks import RecurrentValueNet as Critic from mava.networks import ScannedRNN -from mava.systems.anakin.ppo.types import ( +from mava.systems.ppo.types import ( HiddenStates, OptStates, Params, diff --git a/mava/systems/sebulba/ppo/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py similarity index 99% rename from mava/systems/sebulba/ppo/ff_ippo.py rename to mava/systems/ppo/sebulba/ff_ippo.py index 42d2732ae..316ef0533 100644 --- a/mava/systems/sebulba/ppo/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -36,7 +36,7 @@ from mava.evaluator import make_sebulba_eval_fns as make_eval_fns from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic -from mava.systems.anakin.ppo.types import LearnerState, OptStates, Params, PPOTransition +from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ( ActorApply, CriticApply, @@ -479,7 +479,7 @@ def learner_setup( # Get number of agents and actions. action_space = env.single_action_space config.system.num_agents = len(action_space) - config.system.num_actions = action_space[0].n + config.system.num_actions = int(action_space[0].n) # PRNG keys. key, actor_net_key, critic_net_key = keys diff --git a/mava/systems/anakin/ppo/types.py b/mava/systems/ppo/types.py similarity index 100% rename from mava/systems/anakin/ppo/types.py rename to mava/systems/ppo/types.py diff --git a/mava/systems/anakin/ppo/__init__.py b/mava/systems/q_learning/__init__.py similarity index 100% rename from mava/systems/anakin/ppo/__init__.py rename to mava/systems/q_learning/__init__.py diff --git a/mava/systems/anakin/q_learning/rec_iql.py b/mava/systems/q_learning/anakin/rec_iql.py similarity index 100% rename from mava/systems/anakin/q_learning/rec_iql.py rename to mava/systems/q_learning/anakin/rec_iql.py diff --git a/mava/systems/anakin/q_learning/types.py b/mava/systems/q_learning/types.py similarity index 100% rename from mava/systems/anakin/q_learning/types.py rename to mava/systems/q_learning/types.py diff --git a/mava/systems/anakin/q_learning/__init__.py b/mava/systems/sac/__init__.py similarity index 100% rename from mava/systems/anakin/q_learning/__init__.py rename to mava/systems/sac/__init__.py diff --git a/mava/systems/anakin/sac/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py similarity index 100% rename from mava/systems/anakin/sac/ff_isac.py rename to mava/systems/sac/anakin/ff_isac.py diff --git a/mava/systems/anakin/sac/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py similarity index 100% rename from mava/systems/anakin/sac/ff_masac.py rename to mava/systems/sac/anakin/ff_masac.py diff --git a/mava/systems/anakin/sac/types.py b/mava/systems/sac/types.py similarity index 100% rename from mava/systems/anakin/sac/types.py rename to mava/systems/sac/types.py diff --git a/mava/utils/checkpointing.py b/mava/utils/checkpointing.py index 230c4938d..8955f76ce 100644 --- a/mava/utils/checkpointing.py +++ b/mava/utils/checkpointing.py @@ -24,7 +24,7 @@ from jax.tree_util import tree_map from omegaconf import DictConfig, OmegaConf -from mava.systems.anakin.ppo.types import HiddenStates, Params +from mava.systems.ppo.types import HiddenStates, Params from mava.types import MavaState # Keep track of the version of the checkpointer From 0881d2f1ae12ee3e686dbdf7e53ed7d1cc209ce8 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 16:51:02 +0100 Subject: [PATCH 070/139] fix: removed deprecated jax call --- mava/systems/ppo/sebulba/ff_ippo.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 316ef0533..288249af5 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -148,7 +148,7 @@ def get_action_and_value( # Prepare the data storage_time_start = time.time() next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) # Stack the metrics + metrics = jax.tree_util.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) # Stack the metrics # Append data to storage storage.append( @@ -170,11 +170,11 @@ def get_action_and_value( # Prepare data to share with learner # [PPOTransition() * rollout_len] --> PPOTransition[done=(rollout_len, num_envs, num_agents) # , action=(rollout_len, num_envs, num_agents, num_actions), ...] - stacked_storage = jax.tree_map(lambda *xs: jnp.stack(xs), *storage) + stacked_storage = jax.tree_util.tree_map(lambda *xs: jnp.stack(xs), *storage) # Split the arrays over the different learner_devices on the num_envs axis - sharded_storage = jax.tree_map( + sharded_storage = jax.tree_util.tree_map( lambda x: shard_split_payload(x, 1), stacked_storage ) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) @@ -700,10 +700,10 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 rollout_times.append(time.time() - rollout_start_time) # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_map( + sharded_storages = jax.tree_util.tree_map( lambda *x: jnp.concatenate(x, axis=2), *sharded_storages ) - sharded_next_obss = jax.tree_map( + sharded_next_obss = jax.tree_util.tree_map( lambda *x: jnp.concatenate(x, axis=1), *sharded_next_obss ) sharded_next_dones = jnp.concatenate(sharded_next_dones, axis=1) @@ -730,7 +730,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 # Log the results of the training. elapsed_time = time.time() - training_start_time t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics = jax.tree_map(lambda *x: np.asarray(x), *episode_metrics) + episode_metrics = jax.tree_util.tree_map(lambda *x: np.asarray(x), *episode_metrics) episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time @@ -744,7 +744,7 @@ def run_experiment(_config: DictConfig) -> float: # noqa: CCR001 logger.log(speed_info, t, eval_step, LogEvent.MISC) if ep_completed: # only log episode metrics if an episode was completed in the rollout. logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - train_metrics = jax.tree_map(lambda *x: np.asarray(x), *train_metrics) + train_metrics = jax.tree_util.tree_map(lambda *x: np.asarray(x), *train_metrics) logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) # Evaluation on the learner From b60cefe8e93797f47d66bf3ff23daadf934f5a9e Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 16:51:50 +0100 Subject: [PATCH 071/139] fix: env wrappers fix --- mava/utils/make_env.py | 4 ++-- mava/wrappers/gym.py | 22 ++++++++++------------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index e49d6344b..5755cc03c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -36,7 +36,7 @@ from jumanji.environments.routing.robot_warehouse.generator import ( RandomGenerator as RwareRandomGenerator, ) -from lbforaging.foraging import environment as gym_lbf +from lbforaging.foraging import ForagingEnv as gym_ForagingEnv from omegaconf import DictConfig from rware.warehouse import Warehouse as gym_Warehouse @@ -76,7 +76,7 @@ _gym_registry = { "RobotWarehouse": (gym_Warehouse, GymWrapper), - "LevelBasedForaging": (gym_lbf, GymLBFWrapper), + "LevelBasedForaging": (gym_ForagingEnv, GymLBFWrapper), } diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 18d3ede73..35f3d2335 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -219,19 +219,17 @@ def async_multiagent_worker( # noqa CCR001 autoreset = False pipe.send(((observation, info), True)) elif command == "step": - if autoreset: + # Modified the step function to align with 'AutoResetWrapper'. + # The environment resets immediately upon termination or truncation. + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if np.logical_or(terminated, truncated).all(): observation, info = env.reset() - reward, terminated, truncated = 0, False, False - else: - ( - observation, - reward, - terminated, - truncated, - info, - ) = env.step(data) - # The autoreset was modified to work with boolean arrays. - autoreset = np.logical_or(terminated, truncated).all() if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) From 21aafbffdc1740e99d9ad703e8adc4b5bb3cc8ef Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 16:53:02 +0100 Subject: [PATCH 072/139] fix: config changes --- mava/configs/env/lbf_gym.yaml | 2 +- mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 7 +++++-- mava/utils/logger.py | 2 +- 9 files changed, 37 insertions(+), 16 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 6981f3492..b0d783a7e 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,7 +1,7 @@ # ---Environment Configs--- defaults: - _self_ - - scenario: gym-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] + - scenario: gym-lbf-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml index 386431be4..904d94197 100644 --- a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml @@ -5,11 +5,14 @@ task_name: 10x10-3p-3f task_config: field_size: [10,10] sight: 10 - num_agents: 3 - max_food: 3 + players: 3 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml index 1a8380511..6b24e8de8 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-3p-5f task_config: field_size: [15, 15] sight: 15 - num_agents: 3 - max_food: 5 + players: 3 + max_num_food: 5 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml index fa22f737b..acbb1f6de 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-4p-3f task_config: field_size: [15, 15] sight: 15 - num_agents: 4 - max_food: 3 + players: 4 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml index 28937215c..465385909 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-4p-5f task_config: field_size: [15, 15] sight: 15 - num_agents: 4 - max_food: 5 + players: 4 + max_num_food: 5 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml index f0262eb8d..e6af1860f 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml @@ -5,11 +5,14 @@ task_name: 2s-10x10-3p-3f task_config: field_size: [10, 10] sight: 2 - num_agents: 3 - max_food: 3 + players: 3 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml index ffdc5be0e..3c318d3cf 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml @@ -5,11 +5,14 @@ task_name: 2s-8x8-2p-2f-coop task_config: field_size: [8, 8] # size of the grid to generate. sight: 2 # field of view of an agent. - num_agents: 2 # number of agents on the grid. - max_food: 2 # number of food in the environment. + players: 2 # number of agents on the grid. + max_num_food: 2 # number of food in the environment. max_player_level: 2 # maximum level of the agents (inclusive). force_coop: True # force cooperation between agents. max_episode_steps: 50 # max number of steps per episode. + min_player_level : 1 # minimum level of the agents (inclusive). + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml index 52519fecb..308b891dd 100644 --- a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml @@ -5,11 +5,14 @@ task_name: 8x8-2p-2f-coop task_config: field_size: [8, 8] sight: 8 - num_agents: 2 - max_food: 2 + players: 2 + max_num_food: 2 max_player_level: 2 force_coop: True max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/utils/logger.py b/mava/utils/logger.py index 4edad361e..1416c6061 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -337,7 +337,7 @@ def get_logger_path(config: DictConfig, logger_type: str) -> str: def describe(x: ArrayLike) -> Union[Dict[str, ArrayLike], ArrayLike]: """Generate summary statistics for an array of metrics (mean, std, min, max).""" - if not isinstance(x, jax.Array) or x.size <= 1: + if not isinstance(x, (jax.Array, np.ndarray)) or x.size <= 1: return x # np instead of jnp because we don't jit here From e09fd60f226f3de52ff4da949b7a53e069e9de21 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 16:55:06 +0100 Subject: [PATCH 073/139] chore: pre-commits --- mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 4 +++- mava/wrappers/gym.py | 2 -- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml index 904d94197..3aceaf74f 100644 --- a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: False max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml index 6b24e8de8..14953f3fc 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: False max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml index acbb1f6de..ef678025b 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: False max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml index 465385909..c4dcfb979 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: False max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml index e6af1860f..b094cda72 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: False max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml index 308b891dd..840bbf9f4 100644 --- a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml @@ -10,7 +10,7 @@ task_config: max_player_level: 2 force_coop: True max_episode_steps: 50 - min_player_level : 1 + min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 288249af5..0fe20165e 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -148,7 +148,9 @@ def get_action_and_value( # Prepare the data storage_time_start = time.time() next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_util.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) # Stack the metrics + metrics = jax.tree_util.tree_map( + lambda *x: jnp.asarray(x), *info["metrics"] + ) # Stack the metrics # Append data to storage storage.append( diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 35f3d2335..7ecfb4b27 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -203,7 +203,6 @@ def async_multiagent_worker( # noqa CCR001 env = env_fn() observation_space = env.observation_space action_space = env.action_space - autoreset = False parent_pipe.close() @@ -216,7 +215,6 @@ def async_multiagent_worker( # noqa CCR001 if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) observation = None - autoreset = False pipe.send(((observation, info), True)) elif command == "step": # Modified the step function to align with 'AutoResetWrapper'. From 2a6452d93b818cfb640e5e1939222ba9b79c3b36 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 17:01:00 +0100 Subject: [PATCH 074/139] fix: config file fixes --- mava/configs/env/lbf_gym.yaml | 2 +- mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml | 7 +++++-- mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 7 +++++-- 8 files changed, 36 insertions(+), 15 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 6981f3492..b0d783a7e 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,7 +1,7 @@ # ---Environment Configs--- defaults: - _self_ - - scenario: gym-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] + - scenario: gym-lbf-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml index 386431be4..3aceaf74f 100644 --- a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml @@ -5,11 +5,14 @@ task_name: 10x10-3p-3f task_config: field_size: [10,10] sight: 10 - num_agents: 3 - max_food: 3 + players: 3 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml index 1a8380511..14953f3fc 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-3p-5f task_config: field_size: [15, 15] sight: 15 - num_agents: 3 - max_food: 5 + players: 3 + max_num_food: 5 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml index fa22f737b..ef678025b 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-4p-3f task_config: field_size: [15, 15] sight: 15 - num_agents: 4 - max_food: 3 + players: 4 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml index 28937215c..c4dcfb979 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml @@ -5,11 +5,14 @@ task_name: 15x15-4p-5f task_config: field_size: [15, 15] sight: 15 - num_agents: 4 - max_food: 5 + players: 4 + max_num_food: 5 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml index f0262eb8d..b094cda72 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml @@ -5,11 +5,14 @@ task_name: 2s-10x10-3p-3f task_config: field_size: [10, 10] sight: 2 - num_agents: 3 - max_food: 3 + players: 3 + max_num_food: 3 max_player_level: 2 force_coop: False max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml index ffdc5be0e..3c318d3cf 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml @@ -5,11 +5,14 @@ task_name: 2s-8x8-2p-2f-coop task_config: field_size: [8, 8] # size of the grid to generate. sight: 2 # field of view of an agent. - num_agents: 2 # number of agents on the grid. - max_food: 2 # number of food in the environment. + players: 2 # number of agents on the grid. + max_num_food: 2 # number of food in the environment. max_player_level: 2 # maximum level of the agents (inclusive). force_coop: True # force cooperation between agents. max_episode_steps: 50 # max number of steps per episode. + min_player_level : 1 # minimum level of the agents (inclusive). + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml index 52519fecb..840bbf9f4 100644 --- a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml @@ -5,11 +5,14 @@ task_name: 8x8-2p-2f-coop task_config: field_size: [8, 8] sight: 8 - num_agents: 2 - max_food: 2 + players: 2 + max_num_food: 2 max_player_level: 2 force_coop: True max_episode_steps: 50 + min_player_level : 1 + min_food_level : null + max_food_level : null env_kwargs: {} # there are no scenario specific env_kwargs for this env From e2f36f91e19c4f67510824939e0d909bdf96b22c Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 17:01:15 +0100 Subject: [PATCH 075/139] fix: LBF import --- mava/utils/make_env.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index e49d6344b..5755cc03c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -36,7 +36,7 @@ from jumanji.environments.routing.robot_warehouse.generator import ( RandomGenerator as RwareRandomGenerator, ) -from lbforaging.foraging import environment as gym_lbf +from lbforaging.foraging import ForagingEnv as gym_ForagingEnv from omegaconf import DictConfig from rware.warehouse import Warehouse as gym_Warehouse @@ -76,7 +76,7 @@ _gym_registry = { "RobotWarehouse": (gym_Warehouse, GymWrapper), - "LevelBasedForaging": (gym_lbf, GymLBFWrapper), + "LevelBasedForaging": (gym_ForagingEnv, GymLBFWrapper), } From 29396c98dc474447a6512e3a39bae8738c2cc453 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 17:01:28 +0100 Subject: [PATCH 076/139] fix: Async worker auto-resetting --- mava/wrappers/gym.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 18d3ede73..7b76fc157 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -203,8 +203,6 @@ def async_multiagent_worker( # noqa CCR001 env = env_fn() observation_space = env.observation_space action_space = env.action_space - autoreset = False - parent_pipe.close() try: @@ -216,22 +214,19 @@ def async_multiagent_worker( # noqa CCR001 if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) observation = None - autoreset = False pipe.send(((observation, info), True)) elif command == "step": - if autoreset: + # Modified the step function to align with 'AutoResetWrapper'. + # The environment resets immediately upon termination or truncation. + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if np.logical_or(terminated, truncated).all(): observation, info = env.reset() - reward, terminated, truncated = 0, False, False - else: - ( - observation, - reward, - terminated, - truncated, - info, - ) = env.step(data) - # The autoreset was modified to work with boolean arrays. - autoreset = np.logical_or(terminated, truncated).all() if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) From 6de0b1e1d999b3e2dbea3264c02a4be33cf2512d Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 19 Jul 2024 17:11:57 +0100 Subject: [PATCH 077/139] chore: minor changes --- mava/configs/default_ff_ippo.yaml | 2 +- mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 2 +- mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml | 2 +- mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 2 +- mava/utils/make_env.py | 3 +-- 9 files changed, 9 insertions(+), 10 deletions(-) diff --git a/mava/configs/default_ff_ippo.yaml b/mava/configs/default_ff_ippo.yaml index c4aa6ea49..d942584ce 100644 --- a/mava/configs/default_ff_ippo.yaml +++ b/mava/configs/default_ff_ippo.yaml @@ -3,5 +3,5 @@ defaults: - arch: anakin - system: ppo/ff_ippo - network: mlp - - env: rware_gym + - env: rware - _self_ diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml index 3aceaf74f..a2150115b 100644 --- a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 3 max_player_level: 2 force_coop: False - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml index 14953f3fc..70031bad0 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 5 max_player_level: 2 force_coop: False - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml index ef678025b..b1fe6e4be 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 3 max_player_level: 2 force_coop: False - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml index c4dcfb979..9ce0100f5 100644 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml +++ b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 5 max_player_level: 2 force_coop: False - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml index b094cda72..fea817887 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 3 max_player_level: 2 force_coop: False - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml index 3c318d3cf..b0cacb95c 100644 --- a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 2 # number of food in the environment. max_player_level: 2 # maximum level of the agents (inclusive). force_coop: True # force cooperation between agents. - max_episode_steps: 50 # max number of steps per episode. + max_episode_steps: 100 # max number of steps per episode. min_player_level : 1 # minimum level of the agents (inclusive). min_food_level : null max_food_level : null diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml index 840bbf9f4..3b9cee314 100644 --- a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml +++ b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml @@ -9,7 +9,7 @@ task_config: max_num_food: 2 max_player_level: 2 force_coop: True - max_episode_steps: 50 + max_episode_steps: 100 min_player_level : 1 min_food_level : null max_food_level : null diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 5755cc03c..21b595c06 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -231,8 +231,7 @@ def make_gym_env( Returns: Async environments. """ - base_env_name = config.env.scenario.name - env_maker, wrapper = _gym_registry[base_env_name] + env_maker, wrapper = _gym_registry[config.env.scenario.name] def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Environment: env = env_maker(**config.env.scenario.task_config) From 7584ce5976fcdd5efda95a95a350438de77da8f0 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 22 Jul 2024 09:29:46 +0100 Subject: [PATCH 078/139] fixed: annotations and add agent id spaces --- mava/wrappers/gym.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 7b76fc157..0e1cf6529 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -21,6 +21,7 @@ import gymnasium import numpy as np +from gymnasium import spaces from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray @@ -178,14 +179,14 @@ def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: obs = np.concatenate([self.agent_ids, obs], axis=1) return obs, reward, terminated, truncated, info - def modify_space(self, space: gymnasium.spaces) -> gymnasium.spaces: - if isinstance(space, gymnasium.spaces.Box): - new_shape = space.shape[0] + len(self.agent_ids) - return gymnasium.spaces.Box( - low=space.low, high=space.high, shape=new_shape, dtype=space.dtype + def modify_space(self, space: spaces.Space) -> spaces.Space: + if isinstance(space, spaces.Box): + new_shape = (space.shape[0] + len(self.agent_ids),) + return spaces.Box( + low=space.low[0], high=space.high[0], shape=new_shape, dtype=space.dtype ) - elif isinstance(space, gymnasium.spaces.Tuple): - return gymnasium.spaces.Tuple(self.modify_space(s) for s in space) + elif isinstance(space, spaces.Tuple): + return spaces.Tuple(self.modify_space(s) for s in space) else: raise ValueError(f"Space {type(space)} is not currently supported.") From e638e9fd36c793efd33ddd827843df3ef87f99ab Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 22 Jul 2024 09:35:54 +0100 Subject: [PATCH 079/139] fix: fixed the logging deadlock for sebulba --- mava/utils/logger.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mava/utils/logger.py b/mava/utils/logger.py index 4edad361e..bf502e25c 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -150,8 +150,11 @@ class NeptuneLogger(BaseLogger): def __init__(self, cfg: DictConfig, unique_token: str) -> None: tags = list(cfg.logger.kwargs.neptune_tag) project = cfg.logger.kwargs.neptune_project + mode = ( + "async" if cfg.arch.architecture_name == "anakin" else "sync" + ) # async logging leads to deadlocks in sebulba - self.logger = neptune.init_run(project=project, tags=tags) + self.logger = neptune.init_run(project=project, tags=tags, mode=mode) self.logger["config"] = stringify_unsupported(cfg) self.detailed_logging = cfg.logger.kwargs.detailed_neptune_logging From a85aa2fcbb373d554149a40bf0a29441ae15bad1 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 23 Jul 2024 09:34:42 +0100 Subject: [PATCH 080/139] chore: pre-commits --- mava/utils/make_env.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 3cf7982ea..887a987cb 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -228,40 +228,6 @@ def make_gigastep_env( return train_env, eval_env -def make_gym_env( - config: DictConfig, - num_env: int, - add_global_state: bool = False, -) -> gymnasium.vector.AsyncVectorEnv: - """ - Create a gymnasium environment. - - Args: - config (Dict): The configuration of the environment. - num_env (int) : The number of parallel envs to create. - add_global_state (bool): Whether to add the global state to the observation. Default False. - - Returns: - Async environments. - """ - env_maker, wrapper = _gym_registry[config.env.scenario.name] - - def create_gym_env(config: DictConfig, add_global_state: bool = False) -> Environment: - env = env_maker(**config.env.scenario.task_config) - wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) - if config.env.add_agent_id: - wrapped_env = GymAgentIDWrapper(wrapped_env) - wrapped_env = GymRecordEpisodeMetrics(wrapped_env) - return wrapped_env - - envs = gymnasium.vector.AsyncVectorEnv( - [lambda: create_gym_env(config, add_global_state) for _ in range(num_env)], - worker=async_multiagent_worker, - ) - - return envs - - def make_gym_env( config: DictConfig, num_env: int, From e504b478c7108d024a90f696e07b4e016a3a7ada Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 23 Jul 2024 09:54:13 +0100 Subject: [PATCH 081/139] pre-commit --- mava/wrappers/gym.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 0e1cf6529..520243e92 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -193,7 +193,7 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: # Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents -def async_multiagent_worker( # noqa CCR001 +def async_multiagent_worker( # CCR001 index: int, env_fn: Callable, pipe: Connection, From a19056b431fd93c0a5926988b8c8b08b2e9ddf59 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Thu, 25 Jul 2024 22:47:27 +0100 Subject: [PATCH 082/139] feat : major code restructer, non-blocking evalutors --- mava/configs/arch/sebulba.yaml | 6 +- mava/configs/default_ff_ippo.yaml | 2 +- mava/evaluator.py | 176 ++++++------ mava/systems/ppo/sebulba/ff_ippo.py | 418 ++++++++++------------------ mava/utils/make_env.py | 5 +- mava/utils/sebulba_utils.py | 166 +++++++++++ mava/wrappers/__init__.py | 1 + mava/wrappers/gym.py | 63 +++++ 8 files changed, 466 insertions(+), 371 deletions(-) create mode 100644 mava/utils/sebulba_utils.py diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 0b539059b..9d21a51d3 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -2,18 +2,18 @@ architecture_name: sebulba # --- Training --- -num_envs: 32 # number of environments per thread. +num_envs: 2 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. +num_eval_episodes: 2 # Number of episodes to evaluate per evaluation. num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- -n_threads_per_executor: 1 # num of different threads/env batches per actor +n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices diff --git a/mava/configs/default_ff_ippo.yaml b/mava/configs/default_ff_ippo.yaml index c4aa6ea49..d942584ce 100644 --- a/mava/configs/default_ff_ippo.yaml +++ b/mava/configs/default_ff_ippo.yaml @@ -3,5 +3,5 @@ defaults: - arch: anakin - system: ppo/ff_ippo - network: mlp - - env: rware_gym + - env: rware - _self_ diff --git a/mava/evaluator.py b/mava/evaluator.py index 2d0183878..e754899ae 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -17,7 +17,6 @@ import warnings from typing import Any, Callable, Dict, Protocol, Tuple, Union -import gymnasium import jax import jax.numpy as jnp import numpy as np @@ -35,7 +34,6 @@ Observation, ObservationGlobalState, RecActorApply, - SebulbaEvalFn, State, ) @@ -211,121 +209,109 @@ def eval_act_fn( return eval_act_fn -# todo : Update -def get_sebulba_ff_evaluator_fn( - env: gymnasium.Env, - apply_fn: ActorApply, +def get_sebulba_eval_fn( + env_maker: Callable, + act_fn: EvalActFn, config: DictConfig, np_rng: np.random.Generator, - log_win_rate: bool = False, -) -> SebulbaEvalFn: - """Get the evaluator function for feedforward networks. + absolute_metric: bool, +) -> EvalFn: + """Creates a function that can be used to evaluate agents on a given environment. Args: - env (Environment): An evironment instance for evaluation. - apply_fn (callable): Network forward pass method. - config (dict): Experiment configuration. + ---- + env: an environment that conforms to the mava environment spec. + act_fn: a function that takes in params, timestep, key and optionally a state + and returns actions and optionally a state (see `EvalActFn`). + config: the system config. + absolute_metric: whether or not this evaluator calculates the absolute_metric. + This determines how many evaluation episodes it does. """ + n_devices = jax.device_count() + eval_episodes = ( + config.arch.num_absolute_metric_eval_episodes + if absolute_metric + else config.arch.num_eval_episodes + ) - @jax.jit - def get_action( # todo explicetly put these on the learner? they should already be there - params: FrozenDict, - observation: Observation, - key: PRNGKey, - ) -> Array: - """Get action.""" - - pi = apply_fn(params, observation) - - if config.arch.evaluation_greedy: - action = pi.mode() - else: - action = pi.sample(seed=key) - - return action + n_parallel_envs = min(eval_episodes, config.arch.num_envs) + episode_loops = math.ceil(eval_episodes / n_parallel_envs) + env = env_maker(config, n_parallel_envs) - def eval_episodes(params: FrozenDict, key: PRNGKey) -> Any: - seeds = np_rng.integers(np.iinfo(np.int64).max, size=env.num_envs).tolist() - obs, info = env.reset(seed=seeds) - dones = np.full(env.num_envs, False) - eval_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + # Warnings if num eval episodes is not divisible by num parallel envs. + if eval_episodes % n_parallel_envs != 0: + warnings.warn( + f"Number of evaluation episodes ({eval_episodes}) is not divisible by `num_envs` * " + f"`num_devices` ({n_parallel_envs} * {n_devices}). Some extra evaluations will be " + f"executed. New number of evaluation episodes = {episode_loops * n_parallel_envs}", + stacklevel=2, + ) - while not dones.all(): - key, policy_key = jax.random.split(key) + def eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Metrics: + """Evaluates the given params on an environment and returns relevent metrics. - obs = jax.device_put(jnp.stack(obs, axis=1)) - action_mask = jax.device_put(np.stack(info["actions_mask"])) + Metrics are collected by the `RecordEpisodeMetrics` wrapper: episode return and length, + also win rate for environments that support it. - actions = get_action(params, Observation(obs, action_mask), policy_key) - cpu_action = jax.device_get(actions) + Returns: Dict[str, Array] - dictionary of metric name to metric values for each episode. + """ - obs, reward, terminated, truncated, info = env.step(cpu_action.swapaxes(0, 1)) + def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: + """Simulates `num_envs` episodes.""" - next_metrics = jax.tree_map(lambda *x: jnp.asarray(x), *info["metrics"]) + seeds = np_rng.integers(np.iinfo(np.int32).max, size=n_parallel_envs).tolist() + ts = env.reset(seed=seeds) - next_dones = next_metrics["is_terminal_step"] + timesteps = [ts] - update_flags = np.logical_and(next_dones, np.invert(dones)) + actor_state = init_act_state + finished_eps = ts.last() - update_metrics = lambda new_metric, old_metric, update_flags=update_flags: np.where( - (update_flags), new_metric, old_metric - ) + while not finished_eps.all(): + key, act_key = jax.random.split(key) + action, actor_state = act_fn(params, ts, act_key, actor_state) + cpu_action = jax.device_get(action).swapaxes(0, 1) + ts = env.step(cpu_action) + timesteps.append(ts) - eval_metrics = jax.tree_map(update_metrics, next_metrics, eval_metrics) + finished_eps = np.logical_or(finished_eps, ts.last()) - dones = np.logical_or(dones, next_dones) - eval_metrics.pop("is_terminal_step") + timesteps = jax.tree.map(lambda *x: np.stack(x), *timesteps) - return eval_metrics + metrics = timesteps.extras + if config.env.log_win_rate: + metrics["won_episode"] = timesteps.extras["won_episode"] - return eval_episodes + # find the first instance of done to get the metrics at that timestep, we don't + # care about subsequent steps because we only the results from the first episode + done_idx = jnp.argmax(timesteps.last(), axis=0) + metrics = jax.tree_map(lambda m: m[done_idx, jnp.arange(n_parallel_envs)], metrics) + del metrics["is_terminal_step"] # uneeded for logging + return key, metrics -def make_sebulba_eval_fns( - eval_env_fn: Callable, - network_apply_fn: Union[ActorApply, RecActorApply], - config: DictConfig, - np_rng: np.random.Generator, - add_global_state: bool = False, -) -> Tuple[SebulbaEvalFn, SebulbaEvalFn]: - """Initialize evaluator functions for reinforcement learning. + # This loop is important because we don't want too many parallel envs. + # So in evaluation we have num_envs parallel envs and loop enough times + # so that we do at least `eval_episodes` number of episodes. + metrics = [] + for _ in range(episode_loops): + key, metric = _episode(key) + metrics.append(metric) + + metrics: Metrics = jax.tree_map( + lambda *x: jnp.array(x).reshape(-1), *metrics + ) # flatten metrics + return metrics - Args: - eval_env_fn (Environment): The function to Create the eval envs. - network_apply_fn (Union[ActorApply,RecActorApply]): Creates a policy to sample. - config (DictConfig): The configuration settings for the evaluation. - use_recurrent_net (bool, optional): Whether to use a rnn. Defaults to False. - scanned_rnn (Optional[nn.Module], optional): The rnn module. - Required if `use_recurrent_net` is True. Defaults to None. - - Returns: - Tuple[SebulbaEvalFn, SebulbaEvalFn]: A tuple of two evaluation functions: - one for use during training and one for absolute metrics. - - Raises: - AssertionError: If `use_recurrent_net` is True but `scanned_rnn` is not provided. - """ - eval_env, absolute_eval_env = ( - eval_env_fn(config, config.arch.num_eval_episodes, add_global_state=add_global_state), - eval_env_fn(config, config.arch.num_eval_episodes * 10, add_global_state=add_global_state), - ) + def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Metrics: + """Wrapper around eval function to time it and add in steps per second metric.""" + start_time = time.time() - # Check if win rate is required for evaluation. - log_win_rate = config.env.log_win_rate + metrics = eval_fn(params, key, init_act_state) - evaluator = get_sebulba_ff_evaluator_fn( - eval_env, - network_apply_fn, # type: ignore - config, - np_rng, - log_win_rate, # type: ignore - ) - absolute_metric_evaluator = get_sebulba_ff_evaluator_fn( - absolute_eval_env, - network_apply_fn, # type: ignore - config, - np_rng, - log_win_rate, # type: ignore - ) + end_time = time.time() + total_timesteps = jnp.sum(metrics["episode_length"]) + metrics["steps_per_second"] = total_timesteps / (end_time - start_time) + return metrics - return evaluator, absolute_metric_evaluator + return timed_eval_fn diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index f3a912f5d..fedc7f31d 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -15,9 +15,8 @@ import copy import queue import threading -import time -from collections import deque -from typing import Any, Dict, List, Tuple +from queue import Queue +from typing import Any, Dict, List, Sequence, Tuple import chex import flax @@ -33,7 +32,8 @@ from optax._src.base import OptState from rich.pretty import pprint -from mava.evaluator import make_sebulba_eval_fns as make_eval_fns +from mava.evaluator import get_sebulba_eval_fn as get_eval_fn +from mava.evaluator import make_ff_eval_act_fn from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition @@ -42,12 +42,14 @@ CriticApply, ExperimentOutput, Observation, + SebulbaEvalFn, SebulbaLearnerFn, ) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer -from mava.utils.jax_utils import merge_leading_dims, unreplicate_n_dims +from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.sebulba_utils import ParamsSource, Pipeline, ThreadLifetime from mava.utils.total_timestep_checker import sebulba_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -56,12 +58,12 @@ def rollout( key: chex.PRNGKey, config: DictConfig, - rollout_queue: queue.Queue, - params_queue: queue.Queue, + rollout_pipeline: Pipeline, + params_source: ParamsSource, apply_fns: Tuple, - learner_devices: List, actor_device_id: int, seeds: List[int], + thread_lifetime: ThreadLifetime, ) -> None: # setup env = environments.make_gym_env(config, config.arch.num_envs) @@ -78,137 +80,80 @@ def get_action_and_value( """Get action and value.""" key, subkey = jax.random.split(key) - actor_policy = actor_apply_fn(params.actor_params, observation) # TODO: check vmapiing + actor_policy = actor_apply_fn(params.actor_params, observation) action = actor_policy.sample(seed=subkey) log_prob = actor_policy.log_prob(action) value = critic_apply_fn(params.critic_params, observation).squeeze() return action, log_prob, value, key - # Define queues to track time - params_queue_get_time: deque = deque(maxlen=1) - rollout_time: deque = deque(maxlen=1) - rollout_queue_put_time: deque = deque(maxlen=1) - next_obs, info = env.reset(seed=seeds) - next_dones = jnp.zeros((config.arch.num_envs, config.system.num_agents), dtype=jax.numpy.bool_) + timestep = env.reset(seed=seeds) + next_dones = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + timestep.last(), + ) move_to_device = lambda x: jax.device_put(x, device=current_actor_device) - shard_split_payload = lambda x, axis: jax.device_put_sharded( - jnp.split(x, len(learner_devices), axis=axis), devices=learner_devices - ) - # Loop till the learner has finished training - for _update in range(config.system.num_updates): - inference_time: float = 0 - storage_time: float = 0 - env_send_time: float = 0 - - # Get the latest parameters from the learner - params_queue_get_time_start = time.time() - params = params_queue.get() - params_queue_get_time.append(time.time() - params_queue_get_time_start) - + while not thread_lifetime.should_stop(): # Rollout - rollout_time_start = time.time() - storage: List = [] - + traj: List = [] # Loop over the rollout length - for _ in range(0, config.system.rollout_length): - # Cached for transition - cached_next_obs = move_to_device( - jnp.stack(next_obs, axis=1) - ) # (num_envs, num_agents, ...) - cached_next_dones = move_to_device(next_dones) # (num_envs, num_agents) - cashed_action_mask = move_to_device( - np.stack(info["actions_mask"]) - ) # (num_envs, num_agents, num_actions) - - full_observation = Observation(cached_next_obs, cashed_action_mask) + for _ in range(config.system.rollout_length): + # Get the latest parameters from the learner + params = params_source.get() + + cached_next_obs = jax.tree.map(move_to_device, timestep.observation) + cached_next_dones = move_to_device(next_dones) + # Get action and value - inference_time_start = time.time() ( action, log_prob, value, key, - ) = get_action_and_value(params, full_observation, key) + ) = get_action_and_value(params, cached_next_obs, key) # Step the environment - inference_time += time.time() - inference_time_start - env_send_time_start = time.time() cpu_action = jax.device_get(action) - next_obs, next_reward, terminated, truncated, info = env.step( + timestep = env.step( cpu_action.swapaxes(0, 1) ) # (num_env, num_agents) --> (num_agents, num_env) - env_send_time += time.time() - env_send_time_start - # Prepare the data - storage_time_start = time.time() - next_dones = np.logical_or(terminated, truncated) - metrics = jax.tree_util.tree_map( - lambda *x: jnp.asarray(x), *info["metrics"] - ) # Stack the metrics + next_dones = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + timestep.last(), + ) # Append data to storage - storage.append( + traj.append( PPOTransition( done=cached_next_dones, action=action, value=value, - reward=next_reward, + reward=timestep.reward, log_prob=log_prob, - obs=full_observation, - info=metrics, + obs=cached_next_obs, + info=timestep.extras, ) ) - storage_time += time.time() - storage_time_start - rollout_time.append(time.time() - rollout_time_start) - - parse_timer = time.time() - - # Prepare data to share with learner - # [PPOTransition() * rollout_len] --> PPOTransition[done=(rollout_len, num_envs, num_agents) - # , action=(rollout_len, num_envs, num_agents, num_actions), ...] - stacked_storage = jax.tree_util.tree_map(lambda *xs: jnp.stack(xs), *storage) - - # Split the arrays over the different learner_devices on the num_envs axis - - sharded_storage = jax.tree_util.tree_map( - lambda x: shard_split_payload(x, 1), stacked_storage - ) # (num_learner_devices, rollout_len, num_envs, num_agents, ...) - - # (num_learner_devices, num_envs, num_agents, ...) - sharded_next_obs = shard_split_payload(jnp.stack(next_obs, axis=1), 0) - sharded_next_action_mask = shard_split_payload(np.stack(info["actions_mask"]), 0) - sharded_next_done = shard_split_payload(next_dones, 0) - - # Pack the obs and action mask - payload_obs = Observation(sharded_next_obs, sharded_next_action_mask) - - # For debugging - speed_info = { # noqa F841 - "rollout_time": np.mean(rollout_time), - "params_queue_get_time": np.mean(params_queue_get_time), - "action_inference": inference_time, - "storage_time": storage_time, - "env_step_time": env_send_time, - "rollout_queue_put_time": ( - np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0 - ), - "parse_time": time.time() - parse_timer, - } - - payload = ( - sharded_storage, - payload_obs, - sharded_next_done, - ) + + # todo: replace with the record timer + # speed_info = { # F841 + # "rollout_time": np.mean(rollout_time), + # "params_queue_get_time": np.mean(params_queue_get_time), + # "action_inference": inference_time, + # "storage_time": storage_time, + # W "env_step_time": env_send_time, + # "rollout_queue_put_time": ( + # np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0 + # ), + # "parse_time": time.time() - parse_timer, + # } # Put data in the rollout queue to share it with the learner - rollout_queue_put_time_start = time.time() - rollout_queue.put(payload) - rollout_queue_put_time.append(time.time() - rollout_queue_put_time_start) + rollout_pipeline.put(traj, timestep.observation, next_dones) def get_learner_fn( @@ -397,11 +342,8 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) # SHUFFLE MINIBATCHES - batch_size = ( - config.system.rollout_length - * (config.arch.num_envs // len(config.arch.learner_device_ids)) - * len(config.arch.executor_device_ids) - * config.arch.n_threads_per_executor + batch_size = config.system.rollout_length * ( + config.arch.num_envs // len(config.arch.learner_device_ids) ) permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) @@ -435,7 +377,7 @@ def _critic_loss_fn( def learner_fn( learner_state: LearnerState, traj_batch: PPOTransition, - last_obs: chex.Array, + last_obs: Observation, last_dones: chex.Array, ) -> ExperimentOutput[LearnerState]: """Learner function. @@ -467,6 +409,37 @@ def learner_fn( return learner_fn +def evaluate( + logger: MavaLogger, + payload_queue: Queue, + evaluator: SebulbaEvalFn, + thread_lifetime: ThreadLifetime, + steps_per_rollout: int, + key: chex.PRNGKey, +): + eval_step = 1 + + while not thread_lifetime.should_stop(): + metrics, params = payload_queue.get() + t = int(steps_per_rollout * (eval_step + 1)) + + episode_metrics, train_metrics = jax.tree.map(lambda *x: np.asarray(x), *metrics) + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + + if ep_completed: + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) + + key, eval_key = jax.random.split(key, 2) + episode_metrics = evaluator(params.actor_params, eval_key, {}) + logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) + + # todo add checkpointing + episode_return = jnp.mean(episode_metrics["episode_return"]) + + eval_step += 1 + + def learner_setup( keys: chex.Array, config: DictConfig, learner_devices: List ) -> Tuple[ @@ -572,14 +545,14 @@ def run_experiment(_config: DictConfig) -> float: # Sanity check of config assert ( config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must to be divisible by the number of learners " + ), "The number of environments must to be divisible by the number of learners." assert ( int(config.arch.num_envs / len(config.arch.learner_device_ids)) * config.arch.n_threads_per_executor % config.system.num_minibatches == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches" + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches." # Setup learner. learn, apply_fns, learner_state = learner_setup( @@ -590,8 +563,10 @@ def run_experiment(_config: DictConfig) -> float: np_rng = np.random.default_rng(config.system.seed) # Setup evaluator. - evaluator, absolute_metric_evaluator = make_eval_fns( - environments.make_gym_env, apply_fns[0], config, np_rng + # One key per device for evaluation. + eval_act_fn = make_ff_eval_act_fn(apply_fns[0], config) + evaluator = get_eval_fn( + environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=False ) # Calculate total timesteps. @@ -601,18 +576,9 @@ def run_experiment(_config: DictConfig) -> float: ), "Number of updates per evaluation must be less than total number of updates." # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation - config.arch.num_evaluation, remaining_updates = divmod( - config.system.num_updates, config.system.num_updates_per_eval - ) - config.arch.num_evaluation += ( - remaining_updates != 0 - ) # Add an evaluation step if the num_updates is not a multiple of num_evaluation + steps_per_rollout = ( - len(config.arch.executor_device_ids) - * config.arch.n_threads_per_executor - * config.system.rollout_length - * config.arch.num_envs - * config.system.num_updates_per_eval + config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval ) # Logger setup @@ -632,167 +598,77 @@ def run_experiment(_config: DictConfig) -> float: # Executor setup and launch. unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - params_queues: List = [] - rollout_queues: List = [] - - for _d_idx, d_id in enumerate( # Loop through each executor device - config.arch.executor_device_ids - ): - # Replicate params per executor device - device_params = jax.device_put(unreplicated_params, devices[d_id]) + params_sources: Sequence[ParamsSource] = [] + thread_lifetimes: Sequence[ThreadLifetime] = [] + pipeline = Pipeline(128, learner_devices) # TODO: ADD THE MAX PIPILINE QUEUE SIZE TO THE CONFIG + pipeline.start() + + # Create the actor threads + for d_idx, d_id in enumerate(config.arch.executor_device_ids): # Loop through each executor thread - for _thread_id in range(config.arch.n_threads_per_executor): - seeds = np_rng.integers(np.iinfo(np.int64).max, size=config.arch.num_envs).tolist() - params_queues.append(queue.Queue(maxsize=1)) - rollout_queues.append(queue.Queue(maxsize=1)) - params_queues[-1].put(device_params) + for thread_id in range(config.arch.n_threads_per_executor): + seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() + + params_source = ParamsSource(unreplicated_params, devices[d_id]) + params_source.start() + params_sources.append(params_source) + + lifetime = ThreadLifetime() + thread_lifetimes.append(lifetime) + threading.Thread( target=rollout, args=( jax.device_put(key, devices[d_id]), config, - rollout_queues[-1], - params_queues[-1], + pipeline, + params_sources[-1], apply_fns, - learner_devices, d_id, seeds, + lifetime, ), + name=f"Actor-{thread_id + d_idx * config.arch.n_threads_per_executor}", ).start() - # Run experiment for the total number of updates. - max_episode_return = jnp.float32(0.0) - best_params = None - for eval_step in range(config.arch.num_evaluation): - training_start_time = time.time() - learner_speeds = [] - rollout_times = [] - - episode_metrics = [] - train_metrics = [] - - # Full or partial last eval step. - num_updates_in_eval = ( - remaining_updates - if eval_step == config.arch.num_evaluation - 1 and remaining_updates - else config.system.num_updates_per_eval - ) - for _update in range(num_updates_in_eval): - sharded_storages = [] - sharded_next_obss = [] - sharded_next_dones = [] - - rollout_start_time = time.time() - # Loop through each executor device - for d_idx, _ in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread - for thread_id in range(config.arch.n_threads_per_executor): - # Get data from rollout queue - ( - sharded_storage, - sharded_next_obs, - sharded_next_done, - ) = rollout_queues[d_idx * config.arch.n_threads_per_executor + thread_id].get() - sharded_storages.append(sharded_storage) - sharded_next_obss.append(sharded_next_obs) - sharded_next_dones.append(sharded_next_done) - - rollout_times.append(time.time() - rollout_start_time) - - # Concatinate the returned trajectories on the n_env axis - sharded_storages = jax.tree_util.tree_map( - lambda *x: jnp.concatenate(x, axis=2), *sharded_storages - ) - sharded_next_obss = jax.tree_util.tree_map( - lambda *x: jnp.concatenate(x, axis=1), *sharded_next_obss - ) - sharded_next_dones = jnp.concatenate(sharded_next_dones, axis=1) - - learner_start_time = time.time() - learner_output = learn( - learner_state, sharded_storages, sharded_next_obss, sharded_next_dones - ) - learner_speeds.append(time.time() - learner_start_time) - - # Stack the metrics - episode_metrics.append(learner_output.episode_metrics) - train_metrics.append(learner_output.train_metrics) - - # Send updated params to executors - unreplicated_params = flax.jax_utils.unreplicate(learner_output.learner_state.params) - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - device_params = jax.device_put(unreplicated_params, devices[d_id]) - for thread_id in range(config.arch.n_threads_per_executor): - params_queues[d_idx * config.arch.n_threads_per_executor + thread_id].put( - device_params - ) - - # Log the results of the training. - elapsed_time = time.time() - training_start_time - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics = jax.tree_util.tree_map(lambda *x: np.asarray(x), *episode_metrics) - episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time - - # Separately log timesteps, actoring metrics and training metrics. - speed_info = { - "total_time": elapsed_time, - "rollout_time": np.sum(rollout_times), - "learner_time": np.sum(learner_speeds), - "timestep": t, - } - logger.log(speed_info, t, eval_step, LogEvent.MISC) - if ep_completed: # only log episode metrics if an episode was completed in the rollout. - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - train_metrics = jax.tree_util.tree_map(lambda *x: np.asarray(x), *train_metrics) - logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - - # Evaluation on the learner - evaluation_start_timer = time.time() - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = evaluator( - unreplicate_n_dims(learner_output.learner_state.params.actor_params, 1), eval_key - ) - - # Log the results of the evaluation. - elapsed_time = time.time() - evaluation_start_timer - episode_return = jnp.mean(episode_metrics["episode_return"]) - - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - - if save_checkpoint: - # Save checkpoint of learner state - checkpointer.save( - timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state, 1), - episode_return=episode_return, - ) - - if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(learner_output.learner_state.params.actor_params) - max_episode_return = episode_return - - # Update runner state to continue training. - learner_state = learner_output.learner_state - - # Record the performance for the final evaluation run. - eval_performance = float(jnp.mean(episode_metrics[config.env.eval_metric])) - - # Measure absolute metric. - if config.arch.absolute_metric: - start_time = time.time() + lifetime = ThreadLifetime() + evaluator_queue = Queue() # maxsize=1) + threading.Thread( + target=evaluate, + name="Evaluator", + args=(logger, evaluator_queue, evaluator, lifetime, steps_per_rollout, key), + ).start() + thread_lifetimes.append(lifetime) + + for eval_step in range( + config.arch.num_evaluation + ): # todo : replace :) if comment 3 is the way then this can be replaced with num_evaluation and the try catch in naother loop called num_updates per eval? + # should we have a loop over num actors? how much should we get? + # rn it trains over the output of a single actor + # we can leave it this way and think of other actor threads / devices as just a speed boost? I.e you should get ur desired batch sized base only on the num_envs * rollour_len ? + metrics: Sequence[Tuple[Dict, Dict]] = [] + _update = 0 + while _update != config.system.num_updates_per_eval: + try: + traj_batch, last_obs, last_dones = pipeline.get(block=True, timeout=1) + except queue.Empty: + continue + else: + learner_state, episode_metrics, train_metrics = learn( + learner_state, traj_batch, last_obs, last_dones + ) + metrics.append((episode_metrics, train_metrics)) + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - key_e, eval_key = jax.random.split(key_e, 2) - episode_metrics = absolute_metric_evaluator(unreplicate_n_dims(best_params, 1), eval_key) + for source in params_sources: + source.update(unreplicated_params) + _update += 1 - elapsed_time = time.time() - start_time - steps_per_eval = int(jnp.sum(episode_metrics["episode_length"])) + # Run the evaluator + evaluator_queue.put((metrics, unreplicated_params)) - t = int(steps_per_rollout * (eval_step + 1)) - episode_metrics["steps_per_second"] = steps_per_eval / elapsed_time - logger.log(episode_metrics, t, eval_step, LogEvent.ABSOLUTE) + for thread_lifetime in thread_lifetimes: + thread_lifetime.stop() # Stop the logger. logger.stop() diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 887a987cb..405cb73b8 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -49,6 +49,7 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, + GymToJumanji, GymWrapper, LbfWrapper, MabraxWrapper, @@ -232,7 +233,7 @@ def make_gym_env( config: DictConfig, num_env: int, add_global_state: bool = False, -) -> gymnasium.vector.AsyncVectorEnv: +) -> GymToJumanji: """ Create a gymnasium environment. @@ -259,6 +260,8 @@ def create_gym_env(config: DictConfig, add_global_state: bool = False) -> gymnas worker=async_multiagent_worker, ) + envs = GymToJumanji(envs) + return envs diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py new file mode 100644 index 000000000..073f735c5 --- /dev/null +++ b/mava/utils/sebulba_utils.py @@ -0,0 +1,166 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import queue +import threading +import time +from typing import Any, List, Sequence, Tuple, Union + +import jax +import jax.numpy as jnp +from chex import Array + +from mava.systems.ppo.types import Params, PPOTransition # todo: remove the ppo dependencies +from mava.types import Observation, ObservationGlobalState + + +# Copied from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py +class Pipeline(threading.Thread): + """ + The `Pipeline` shards trajectories into `learner_devices`, + ensuring trajectories are consumed in the right order to avoid being off-policy + and limit the max number of samples in device memory at one time to avoid OOM issues. + """ + + def __init__(self, max_size: int, learner_devices: List[jax.Device]): + """ + Initializes the pipeline with a maximum size and the devices to shard trajectories across. + + Args: + max_size: The maximum number of trajectories to keep in the pipeline. + learner_devices: The devices to shard trajectories across. + """ + super().__init__(name="Pipeline") + self.learner_devices = learner_devices + self.tickets_queue: queue.Queue = queue.Queue() + self._queue: queue.Queue = queue.Queue(maxsize=max_size) + + def run(self) -> None: + """ + This function ensures that trajectories on the queue are consumed in the right order. The + start_condition and end_condition are used to ensure that only 1 thread is processing an + item from the queue at one time, ensuring predictable memory usage. + """ + while True: # todo Thread lifetime + start_condition, end_condition = self.tickets_queue.get() + with end_condition: + with start_condition: + start_condition.notify() + end_condition.wait() + + def put( + self, + traj: Sequence[PPOTransition], + next_obs: Union[Observation, ObservationGlobalState], + next_dones: Array, + ) -> None: + """ + Put a trajectory on the queue to be consumed by the learner. + """ + start_condition, end_condition = (threading.Condition(), threading.Condition()) + with start_condition: + self.tickets_queue.put((start_condition, end_condition)) + start_condition.wait() # wait to be allowed to start + + # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, num_agents) + sharded_traj = jax.tree.map(lambda *x: self.shard_split_playload(jnp.stack(x), 1), *traj) + + # obs Tuple[(num_envs, num_agents, ...), ...] --> [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices + sharded_next_obs = jax.tree.map(self.shard_split_playload, next_obs) + + # dones (num_envs, num_agents) --> [(num_envs / num_learner_devices, num_agents)] * num_learner_devices + sharded_next_dones = self.shard_split_playload(next_dones, 0) + + self._queue.put((sharded_traj, sharded_next_obs, sharded_next_dones)) + + with end_condition: + end_condition.notify() # tell we have finish + + def qsize(self) -> int: + """Returns the number of trajectories in the pipeline.""" + return self._queue.qsize() + + def get( + self, block: bool = True, timeout: Union[float, None] = None + ) -> Tuple[PPOTransition, Union[Observation, ObservationGlobalState], Array]: + """Get a trajectory from the pipeline.""" + return self._queue.get(block, timeout) # type: ignore + + def shard_split_playload(self, payload: Any, axis: int = 0): + split_payload = jnp.split(payload, len(self.learner_devices), axis=axis) + return jax.device_put_sharded(split_payload, devices=self.learner_devices) + + +class ParamsSource(threading.Thread): + """ + A `ParamSource` is a component that allows networks params to be passed from a + `Learner` component to `Actor` components. + """ + + def __init__(self, init_value: Params, device: jax.Device): + super().__init__(name=f"ParamsSource-{device.id}") + self.value = jax.device_put(init_value, device) + self.device = device + self.new_value: queue.Queue = queue.Queue() + + def run(self) -> None: + """ + This function is responsible for updating the value of the `ParamSource` when a new value + is available. + """ + while True: + try: + waiting = self.new_value.get(block=True, timeout=1) + self.value = jax.device_put(jax.block_until_ready(waiting), self.device) + except queue.Empty: + continue + + def update(self, new_params: Params) -> None: + """ + Update the value of the `ParamSource` with a new value. + + Args: + new_params: The new value to update the `ParamSource` with. + """ + self.new_value.put(new_params) + + def get(self) -> Params: + """Get the current value of the `ParamSource`.""" + return self.value + + +class RecordTimeTo: + def __init__(self, to: Any): + self.to = to + + def __enter__(self) -> None: + self.start = time.monotonic() + + def __exit__(self, *args: Any) -> None: + end = time.monotonic() + self.to.append(end - self.start) + + +class ThreadLifetime: + """Simple class for a mutable boolean that can be used to signal a thread to stop.""" + + def __init__(self): + self._stop = False + + def should_stop(self): + return self._stop + + def stop(self): + self._stop = True diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 550180ee5..a7b56c5da 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -20,6 +20,7 @@ GymAgentIDWrapper, GymLBFWrapper, GymRecordEpisodeMetrics, + GymToJumanji, GymWrapper, async_multiagent_worker, ) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 520243e92..5bfb24e8c 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -20,11 +20,15 @@ from typing import Any, Callable, Dict, Optional, Tuple, Union import gymnasium +import jax import numpy as np from gymnasium import spaces from gymnasium.vector.utils import write_to_shared_memory +from jumanji.types import StepType, TimeStep from numpy.typing import NDArray +from mava.types import Observation, ObservationGlobalState + # Filter out the warnings warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") @@ -191,6 +195,65 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: raise ValueError(f"Space {type(space)} is not currently supported.") +class GymToJumanji(gymnasium.Wrapper): + """Converts Gym outputs to Jumanji timesteps""" + + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> TimeStep: + obs, info = self.env.reset(seed=seed, options=options) + + num_agents = len(self.env.single_action_space) + num_envs = self.env.num_envs + + ep_done = np.zeros(num_envs, dtype=float) + rewards = np.zeros((num_envs, num_agents), dtype=float) + + timestep = self._create_timestep(obs, ep_done, rewards, info) + + return timestep + + def step(self, action: list) -> TimeStep: + obs, rewards, terminated, truncated, info = self.env.step(action) + + ep_done = np.logical_or(terminated, truncated).all(axis=1) + + timestep = self._create_timestep(obs, ep_done, rewards, info) + + return timestep + + def _format_observation( + self, obs: NDArray, info: Dict + ) -> Union[Observation, ObservationGlobalState]: + """Create an observation from the raw observation and environment state.""" + + obs = np.array(obs).swapaxes( + 0, 1 + ) # (num_agents, num_envs, ...) -> (num_envs, num_agents, ...) + action_mask = np.stack(info["actions_mask"]) + obs_data = {"agents_view": obs, "action_mask": action_mask} + + if "global_obs" in info: + global_obs = np.array(info["global_obs"]).swapaxes(0, 1) + obs_data["global_state"] = global_obs + return ObservationGlobalState(**obs_data) + else: + return Observation(**obs_data) + + def _create_timestep( + self, obs: NDArray, ep_done: NDArray, rewards: NDArray, info: Dict + ) -> TimeStep: + obs = self._format_observation(obs, info) + extras = jax.tree.map(lambda *x: np.stack(x), *info["metrics"]) + step_type = np.where(ep_done, StepType.LAST, StepType.MID) + + return TimeStep( + step_type=step_type, + reward=rewards, + discount=1.0 - ep_done, + observation=obs, + extras=extras, + ) + + # Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents def async_multiagent_worker( # CCR001 From fc80b91def01524c0ce7d333c012393bfb52325f Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Fri, 26 Jul 2024 22:37:36 +0100 Subject: [PATCH 083/139] chore: code cleanup and sps calcs and learner threads --- mava/configs/arch/sebulba.yaml | 9 +- mava/systems/ppo/sebulba/ff_ippo.py | 272 +++++++++++++++------------- mava/utils/sebulba_utils.py | 23 ++- mava/wrappers/gym.py | 4 +- 4 files changed, 169 insertions(+), 139 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 9d21a51d3..e38691780 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -2,13 +2,13 @@ architecture_name: sebulba # --- Training --- -num_envs: 2 # number of environments per thread. +num_envs: 32 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_eval_episodes: 2 # Number of episodes to evaluate per evaluation. +num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 @@ -17,3 +17,8 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices +Pilpeline_queue_size : 2 +# The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. +# Too large of a value with too many actors will lead to all of the updates getting wasted in old episodes +# Too small of a value and the utility of having multiple actors is lost. +# A value of 1 leads to almost strictly on-policy training. diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index fedc7f31d..3f07adda8 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -13,7 +13,6 @@ # limitations under the License. import copy -import queue import threading from queue import Queue from typing import Any, Dict, List, Sequence, Tuple @@ -42,14 +41,13 @@ CriticApply, ExperimentOutput, Observation, - SebulbaEvalFn, SebulbaLearnerFn, ) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.sebulba_utils import ParamsSource, Pipeline, ThreadLifetime +from mava.utils.sebulba_utils import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.total_timestep_checker import sebulba_check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -69,6 +67,7 @@ def rollout( env = environments.make_gym_env(config, config.arch.num_envs) current_actor_device = jax.devices()[actor_device_id] actor_apply_fn, critic_apply_fn = apply_fns + num_agents, num_envs = config.system.num_agents, config.arch.num_envs # Define the util functions: select action function and prepare data to share it with learner. @jax.jit @@ -88,8 +87,9 @@ def get_action_and_value( return action, log_prob, value, key timestep = env.reset(seed=seeds) + next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + lambda x: jnp.repeat(x, num_agents).reshape(num_envs, -1), timestep.last(), ) @@ -99,61 +99,52 @@ def get_action_and_value( while not thread_lifetime.should_stop(): # Rollout traj: List = [] - # Loop over the rollout length - for _ in range(config.system.rollout_length): - # Get the latest parameters from the learner - params = params_source.get() - - cached_next_obs = jax.tree.map(move_to_device, timestep.observation) - cached_next_dones = move_to_device(next_dones) - - # Get action and value - ( - action, - log_prob, - value, - key, - ) = get_action_and_value(params, cached_next_obs, key) - - # Step the environment - cpu_action = jax.device_get(action) - timestep = env.step( - cpu_action.swapaxes(0, 1) - ) # (num_env, num_agents) --> (num_agents, num_env) - - next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), - timestep.last(), - ) + time_dict: Dict[str, List[float]] = {"single_rollout": [], "env_step_time": []} - # Append data to storage - traj.append( - PPOTransition( - done=cached_next_dones, - action=action, - value=value, - reward=timestep.reward, - log_prob=log_prob, - obs=cached_next_obs, - info=timestep.extras, + # Loop over the rollout length + with RecordTimeTo(time_dict["single_rollout"]): + for _ in range(config.system.rollout_length): + # Get the latest parameters from the learner + params = params_source.get() + + cached_next_obs = jax.tree.map(move_to_device, timestep.observation) + cached_next_dones = move_to_device(next_dones) + + # Get action and value + ( + action, + log_prob, + value, + key, + ) = get_action_and_value(params, cached_next_obs, key) + + # Step the environment + cpu_action = jax.device_get(action) + + with RecordTimeTo(time_dict["env_step_time"]): + timestep = env.step( + cpu_action.swapaxes(0, 1) + ) # (num_env, num_agents) --> (num_agents, num_env) + + next_dones = jax.tree_util.tree_map( + lambda x: jnp.repeat(x, num_agents).reshape(num_envs, -1), + timestep.last(), ) - ) - # todo: replace with the record timer - # speed_info = { # F841 - # "rollout_time": np.mean(rollout_time), - # "params_queue_get_time": np.mean(params_queue_get_time), - # "action_inference": inference_time, - # "storage_time": storage_time, - # W "env_step_time": env_send_time, - # "rollout_queue_put_time": ( - # np.mean(rollout_queue_put_time) if rollout_queue_put_time else 0 - # ), - # "parse_time": time.time() - parse_timer, - # } + # Append data to storage + traj.append( + PPOTransition( + done=cached_next_dones, + action=action, + value=value, + reward=timestep.reward, + log_prob=log_prob, + obs=cached_next_obs, + info=timestep.extras, + ) + ) - # Put data in the rollout queue to share it with the learner - rollout_pipeline.put(traj, timestep.observation, next_dones) + rollout_pipeline.put(traj, timestep.observation, next_dones, time_dict) def get_learner_fn( @@ -190,7 +181,7 @@ def _update_step( _ (Any): The current metrics info. """ - def _calculate_gae( # todo: lake sure this is appropriate + def _calculate_gae( traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array ) -> Tuple[chex.Array, chex.Array]: def _get_advantages( @@ -303,7 +294,7 @@ def _critic_loss_fn( # pmean over devices. actor_grads, actor_loss_info = jax.lax.pmean( (actor_grads, actor_loss_info), - axis_name="device", # todo: pmean over learner devices not all + axis_name="device", ) # pmean over devices. @@ -394,8 +385,6 @@ def learner_fn( - env_state (LogEnvState): The environment state. - timesteps (TimeStep): The initial timestep in the initial trajectory. """ - - # todo: add update_batch_size learner_state, (episode_info, loss_info) = _update_step( learner_state, traj_batch, last_obs, last_dones ) @@ -409,37 +398,6 @@ def learner_fn( return learner_fn -def evaluate( - logger: MavaLogger, - payload_queue: Queue, - evaluator: SebulbaEvalFn, - thread_lifetime: ThreadLifetime, - steps_per_rollout: int, - key: chex.PRNGKey, -): - eval_step = 1 - - while not thread_lifetime.should_stop(): - metrics, params = payload_queue.get() - t = int(steps_per_rollout * (eval_step + 1)) - - episode_metrics, train_metrics = jax.tree.map(lambda *x: np.asarray(x), *metrics) - episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - - if ep_completed: - logger.log(episode_metrics, t, eval_step, LogEvent.ACT) - logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - - key, eval_key = jax.random.split(key, 2) - episode_metrics = evaluator(params.actor_params, eval_key, {}) - logger.log(episode_metrics, t, eval_step, LogEvent.EVAL) - - # todo add checkpointing - episode_return = jnp.mean(episode_metrics["episode_return"]) - - eval_step += 1 - - def learner_setup( keys: chex.Array, config: DictConfig, learner_devices: List ) -> Tuple[ @@ -530,6 +488,46 @@ def learner_setup( return learn, apply_fns, init_learner_state +def learner( + learn: SebulbaLearnerFn[LearnerState, PPOTransition], + learner_state: LearnerState, + config: DictConfig, + learner_queue: Queue, + pipeline: Pipeline, + params_sources: Sequence[ParamsSource], +) -> None: + for _eval_step in range(config.arch.num_evaluation): + metrics: List[Tuple[Dict, Dict]] = [] + rollout_times: List[Dict] = [] + eval_times: Dict[str, List[float]] = {"evaluator_blocked_time": [], "evaluation_time": []} + + for _update in range(config.system.num_updates_per_eval): + with RecordTimeTo(eval_times["evaluator_blocked_time"]): + traj_batch, last_obs, last_dones, rollout_time = pipeline.get(block=True) + + with RecordTimeTo(eval_times["evaluation_time"]): + learner_state, episode_metrics, train_metrics = learn( + learner_state, traj_batch, last_obs, last_dones + ) + + metrics.append((episode_metrics, train_metrics)) + rollout_times.append(rollout_time) + + unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + + for source in params_sources: + source.update(unreplicated_params) + + # Pass to the evaluator + episode_metrics, train_metrics = jax.tree.map(lambda *x: np.asarray(x), *metrics) + + rollout_times = jax.tree.map(lambda *x: np.mean(x), *rollout_times) + times_dict = rollout_times | eval_times + times_dict = jax.tree.map(np.mean, times_dict, is_leaf=lambda x: isinstance(x, list)) + + learner_queue.put((episode_metrics, train_metrics, learner_state, times_dict)) + + def run_experiment(_config: DictConfig) -> float: """Runs experiment.""" config = copy.deepcopy(_config) @@ -597,10 +595,10 @@ def run_experiment(_config: DictConfig) -> float: ) # Executor setup and launch. - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - params_sources: Sequence[ParamsSource] = [] - thread_lifetimes: Sequence[ThreadLifetime] = [] - pipeline = Pipeline(128, learner_devices) # TODO: ADD THE MAX PIPILINE QUEUE SIZE TO THE CONFIG + unreplicated_inital_params = flax.jax_utils.unreplicate(learner_state.params) + params_sources: List[ParamsSource] = [] + thread_lifetimes: List[ThreadLifetime] = [] + pipeline = Pipeline(config.arh.Pilpeline_queue_size, learner_devices) pipeline.start() # Create the actor threads @@ -609,7 +607,7 @@ def run_experiment(_config: DictConfig) -> float: for thread_id in range(config.arch.n_threads_per_executor): seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() - params_source = ParamsSource(unreplicated_params, devices[d_id]) + params_source = ParamsSource(unreplicated_inital_params, devices[d_id]) params_source.start() params_sources.append(params_source) @@ -631,45 +629,67 @@ def run_experiment(_config: DictConfig) -> float: name=f"Actor-{thread_id + d_idx * config.arch.n_threads_per_executor}", ).start() - lifetime = ThreadLifetime() - evaluator_queue = Queue() # maxsize=1) + learner_queue: Queue = Queue() threading.Thread( - target=evaluate, - name="Evaluator", - args=(logger, evaluator_queue, evaluator, lifetime, steps_per_rollout, key), + target=learner, + name="Learner", + args=(learn, learner_state, config, learner_queue, pipeline, params_sources), ).start() - thread_lifetimes.append(lifetime) - - for eval_step in range( - config.arch.num_evaluation - ): # todo : replace :) if comment 3 is the way then this can be replaced with num_evaluation and the try catch in naother loop called num_updates per eval? - # should we have a loop over num actors? how much should we get? - # rn it trains over the output of a single actor - # we can leave it this way and think of other actor threads / devices as just a speed boost? I.e you should get ur desired batch sized base only on the num_envs * rollour_len ? - metrics: Sequence[Tuple[Dict, Dict]] = [] - _update = 0 - while _update != config.system.num_updates_per_eval: - try: - traj_batch, last_obs, last_dones = pipeline.get(block=True, timeout=1) - except queue.Empty: - continue - else: - learner_state, episode_metrics, train_metrics = learn( - learner_state, traj_batch, last_obs, last_dones - ) - metrics.append((episode_metrics, train_metrics)) - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) - for source in params_sources: - source.update(unreplicated_params) - _update += 1 + max_episode_return = -jnp.inf + best_params = unreplicated_inital_params.actor_params + + for eval_step in range(config.arch.num_evaluation): + # Get the next set of params and metrics from the evaluator + episode_metrics, train_metrics, learner_state, times_dict = learner_queue.get() - # Run the evaluator - evaluator_queue.put((metrics, unreplicated_params)) + t = int(steps_per_rollout * (eval_step + 1)) + + times_dict["timestep"] = t + logger.log(times_dict, t, eval_step, LogEvent.MISC) + + episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / times_dict["single_rollout"] + if ep_completed: + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + + logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) + + unreplicated_actor_params = flax.jax_utils.unreplicate(learner_state.params.actor_params) + key, eval_key = jax.random.split(key, 2) + eval_metrics = evaluator(unreplicated_actor_params, eval_key, {}) + logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) + + episode_return = jnp.mean(eval_metrics["episode_return"]) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=learner_state, + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(unreplicated_actor_params) + max_episode_return = episode_return for thread_lifetime in thread_lifetimes: thread_lifetime.stop() + eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + abs_metric_evaluator = get_eval_fn( + environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=True + ) + key, eval_key = jax.random.split(key, 2) + eval_metrics = abs_metric_evaluator(best_params, eval_key, {}) + + t = int(steps_per_rollout * (eval_step + 1)) + logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) + # Stop the logger. logger.stop() diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index 073f735c5..a5c0bdc14 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -16,7 +16,7 @@ import queue import threading import time -from typing import Any, List, Sequence, Tuple, Union +from typing import Any, Dict, List, Sequence, Tuple, Union import jax import jax.numpy as jnp @@ -65,6 +65,7 @@ def put( traj: Sequence[PPOTransition], next_obs: Union[Observation, ObservationGlobalState], next_dones: Array, + time_dict: Dict, ) -> None: """ Put a trajectory on the queue to be consumed by the learner. @@ -77,13 +78,15 @@ def put( # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, num_agents) sharded_traj = jax.tree.map(lambda *x: self.shard_split_playload(jnp.stack(x), 1), *traj) - # obs Tuple[(num_envs, num_agents, ...), ...] --> [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices + # obs Tuple[(num_envs, num_agents, ...), ...] --> + # [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices sharded_next_obs = jax.tree.map(self.shard_split_playload, next_obs) - # dones (num_envs, num_agents) --> [(num_envs / num_learner_devices, num_agents)] * num_learner_devices + # dones (num_envs, num_agents) --> + # [(num_envs / num_learner_devices, num_agents)] * num_learner_devices sharded_next_dones = self.shard_split_playload(next_dones, 0) - self._queue.put((sharded_traj, sharded_next_obs, sharded_next_dones)) + self._queue.put((sharded_traj, sharded_next_obs, sharded_next_dones, time_dict)) with end_condition: end_condition.notify() # tell we have finish @@ -94,11 +97,11 @@ def qsize(self) -> int: def get( self, block: bool = True, timeout: Union[float, None] = None - ) -> Tuple[PPOTransition, Union[Observation, ObservationGlobalState], Array]: + ) -> Tuple[PPOTransition, Union[Observation, ObservationGlobalState], Array, Dict]: """Get a trajectory from the pipeline.""" return self._queue.get(block, timeout) # type: ignore - def shard_split_playload(self, payload: Any, axis: int = 0): + def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: split_payload = jnp.split(payload, len(self.learner_devices), axis=axis) return jax.device_put_sharded(split_payload, devices=self.learner_devices) @@ -111,7 +114,7 @@ class ParamsSource(threading.Thread): def __init__(self, init_value: Params, device: jax.Device): super().__init__(name=f"ParamsSource-{device.id}") - self.value = jax.device_put(init_value, device) + self.value: Params = jax.device_put(init_value, device) self.device = device self.new_value: queue.Queue = queue.Queue() @@ -156,11 +159,11 @@ def __exit__(self, *args: Any) -> None: class ThreadLifetime: """Simple class for a mutable boolean that can be used to signal a thread to stop.""" - def __init__(self): + def __init__(self) -> None: self._stop = False - def should_stop(self): + def should_stop(self) -> bool: return self._stop - def stop(self): + def stop(self) -> None: self._stop = True diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 5bfb24e8c..35bd674bd 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -198,7 +198,9 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: class GymToJumanji(gymnasium.Wrapper): """Converts Gym outputs to Jumanji timesteps""" - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None) -> TimeStep: + def reset( + self, seed: Optional[list[int]] = None, options: Optional[list[dict]] = None + ) -> TimeStep: obs, info = self.env.reset(seed=seed, options=options) num_agents = len(self.env.single_action_space) From 18ec08f843460ca200f20d5cb40694bf87aac50b Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 11:33:47 +0100 Subject: [PATCH 084/139] feat: shared time steps checker --- mava/configs/arch/sebulba.yaml | 2 +- mava/systems/ppo/anakin/ff_ippo.py | 4 +-- mava/systems/ppo/anakin/ff_mappo.py | 4 +-- mava/systems/ppo/anakin/rec_ippo.py | 4 +-- mava/systems/ppo/anakin/rec_mappo.py | 4 +-- mava/systems/ppo/sebulba/ff_ippo.py | 11 +++--- mava/systems/q_learning/anakin/rec_iql.py | 4 +-- mava/systems/sac/anakin/ff_isac.py | 4 +-- mava/systems/sac/anakin/ff_masac.py | 4 +-- mava/utils/total_timestep_checker.py | 44 ++++++----------------- 10 files changed, 29 insertions(+), 56 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index e38691780..e9865460a 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -17,7 +17,7 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices -Pilpeline_queue_size : 2 +pilpeline_queue_size : 5 # The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. # Too large of a value with too many actors will lead to all of the updates getting wasted in old episodes # Too small of a value and the utility of having multiple actors is lost. diff --git a/mava/systems/ppo/anakin/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py index d0fb9c30f..49c969cdb 100644 --- a/mava/systems/ppo/anakin/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -41,7 +41,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -475,7 +475,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) # Calculate total timesteps. - config = anakin_check_total_timesteps(config) + config = check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/ppo/anakin/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py index 20ae3272e..cafa42888 100644 --- a/mava/systems/ppo/anakin/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -36,7 +36,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -459,7 +459,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) # Calculate total timesteps. - config = anakin_check_total_timesteps(config) + config = check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/ppo/anakin/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py index a073d6dcb..230756295 100644 --- a/mava/systems/ppo/anakin/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -50,7 +50,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -619,7 +619,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) # Calculate total timesteps. - config = anakin_check_total_timesteps(config) + config = check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/ppo/anakin/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py index 3e741f5c1..53ae7c65d 100644 --- a/mava/systems/ppo/anakin/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -50,7 +50,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -615,7 +615,7 @@ def run_experiment(_config: DictConfig) -> float: evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) # Calculate total timesteps. - config = anakin_check_total_timesteps(config) + config = check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 3f07adda8..b9f83f20b 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -48,7 +48,7 @@ from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.sebulba_utils import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime -from mava.utils.total_timestep_checker import sebulba_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -95,7 +95,7 @@ def get_action_and_value( move_to_device = lambda x: jax.device_put(x, device=current_actor_device) - # Loop till the learner has finished training + # Loop till the desired num_updates is reached. while not thread_lifetime.should_stop(): # Rollout traj: List = [] @@ -568,7 +568,7 @@ def run_experiment(_config: DictConfig) -> float: ) # Calculate total timesteps. - config = sebulba_check_total_timesteps(config) + config = check_total_timesteps(config) assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." @@ -598,7 +598,7 @@ def run_experiment(_config: DictConfig) -> float: unreplicated_inital_params = flax.jax_utils.unreplicate(learner_state.params) params_sources: List[ParamsSource] = [] thread_lifetimes: List[ThreadLifetime] = [] - pipeline = Pipeline(config.arh.Pilpeline_queue_size, learner_devices) + pipeline = Pipeline(config.arch.pilpeline_queue_size, learner_devices) pipeline.start() # Create the actor threads @@ -712,6 +712,3 @@ def hydra_entry_point(cfg: DictConfig) -> float: if __name__ == "__main__": hydra_entry_point() - -# learner_output.episode_metrics.keys() -# dict_keys(['episode_length', 'episode_return']) diff --git a/mava/systems/q_learning/anakin/rec_iql.py b/mava/systems/q_learning/anakin/rec_iql.py index a8fa7964b..05b860d85 100644 --- a/mava/systems/q_learning/anakin/rec_iql.py +++ b/mava/systems/q_learning/anakin/rec_iql.py @@ -54,7 +54,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics @@ -533,7 +533,7 @@ def update_step( def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = anakin_check_total_timesteps(cfg) + cfg = check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/systems/sac/anakin/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py index d0f243b3f..955725e00 100644 --- a/mava/systems/sac/anakin/ff_isac.py +++ b/mava/systems/sac/anakin/ff_isac.py @@ -51,7 +51,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics @@ -488,7 +488,7 @@ def update_step(carry: LearnerState, _: Any) -> Tuple[LearnerState, Tuple[Metric def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = anakin_check_total_timesteps(cfg) + cfg = check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/systems/sac/anakin/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py index bf45f4b83..2df296be4 100644 --- a/mava/systems/sac/anakin/ff_masac.py +++ b/mava/systems/sac/anakin/ff_masac.py @@ -52,7 +52,7 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import anakin_check_total_timesteps +from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics @@ -506,7 +506,7 @@ def update_step(carry: LearnerState, _: Any) -> Tuple[LearnerState, Tuple[Metric def run_experiment(cfg: DictConfig) -> float: # Add runtime variables to config cfg.arch.n_devices = len(jax.devices()) - cfg = anakin_check_total_timesteps(cfg) + cfg = check_total_timesteps(cfg) # Number of env steps before evaluating/logging. steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) diff --git a/mava/utils/total_timestep_checker.py b/mava/utils/total_timestep_checker.py index 744451d1b..e48e40923 100644 --- a/mava/utils/total_timestep_checker.py +++ b/mava/utils/total_timestep_checker.py @@ -18,47 +18,23 @@ from omegaconf import DictConfig -def anakin_check_total_timesteps(config: DictConfig) -> DictConfig: +def check_total_timesteps(config: DictConfig) -> DictConfig: """Check if total_timesteps is set, if not, set it based on the other parameters""" - n_devices = len(jax.devices()) - if config.system.total_timesteps is None: - config.system.num_updates = int(config.system.num_updates) - config.system.total_timesteps = int( - n_devices - * config.system.num_updates - * config.system.rollout_length - * config.system.update_batch_size - * config.arch.num_envs - ) + if config.arch.architecture_name == "anakin": + n_devices = len(jax.devices()) + update_batch_size = config.system.update_batch_size else: - config.system.total_timesteps = int(config.system.total_timesteps) - config.system.num_updates = int( - config.system.total_timesteps - // config.system.rollout_length - // config.system.update_batch_size - // config.arch.num_envs - // n_devices - ) - print( - f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " - + f"to {config.system.num_updates}: If you want to train" - + " for a specific number of updates, please set total_timesteps to None!" - + f"{Style.RESET_ALL}" - ) - return config - - -def sebulba_check_total_timesteps(config: DictConfig) -> DictConfig: - """Check if total_timesteps is set, if not, set it based on the other parameters""" + n_devices = 1 # We only use a single device's output when updating. + update_batch_size = 1 if config.system.total_timesteps is None: config.system.num_updates = int(config.system.num_updates) config.system.total_timesteps = int( - len(config.arch.executor_device_ids) - * config.arch.n_threads_per_executor + n_devices * config.system.num_updates * config.system.rollout_length + * update_batch_size * config.arch.num_envs ) else: @@ -66,9 +42,9 @@ def sebulba_check_total_timesteps(config: DictConfig) -> DictConfig: config.system.num_updates = int( config.system.total_timesteps // config.system.rollout_length + // update_batch_size // config.arch.num_envs - // config.arch.n_threads_per_executor - // len(config.arch.executor_device_ids) + // n_devices ) print( f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " From 38e72291073fa9abeeffa719d48b661f861f18c4 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 11:49:58 +0100 Subject: [PATCH 085/139] chore: removed unused eval type --- mava/types.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mava/types.py b/mava/types.py index 1c9f64640..1d5878c5a 100644 --- a/mava/types.py +++ b/mava/types.py @@ -157,7 +157,6 @@ class ExperimentOutput(NamedTuple, Generic[MavaState]): [MavaState, MavaTransition, chex.Array, chex.Array], ExperimentOutput[MavaState] ] EvalFn = Callable[[FrozenDict, chex.PRNGKey], ExperimentOutput[MavaState]] -SebulbaEvalFn = Callable[[FrozenDict, chex.PRNGKey], Dict] ActorApply = Callable[[FrozenDict, Observation], Distribution] CriticApply = Callable[[FrozenDict, Observation], Value] From 5a5e542c6b135bcc86d2d40c06ac6905e5f7b435 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 11:53:22 +0100 Subject: [PATCH 086/139] chore: config file changes --- mava/configs/arch/sebulba.yaml | 3 ++- .../{default_ff_ippo_seb.yaml => default_ff_ippo_sebulba.yaml} | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) rename mava/configs/{default_ff_ippo_seb.yaml => default_ff_ippo_sebulba.yaml} (84%) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index e9865460a..5934bb3d5 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -10,8 +10,9 @@ evaluation_greedy: False # Evaluate the policy greedily. If True the policy will # from the logits. num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. +num_absolute_metric_eval_episodes: 320 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details - # on the absolute metric please see: https://arxiv.org/abs/2209.10485 +# on the absolute metric please see: https://arxiv.org/abs/2209.10485 # --- Sebulba devices config --- n_threads_per_executor: 2 # num of different threads/env batches per actor diff --git a/mava/configs/default_ff_ippo_seb.yaml b/mava/configs/default_ff_ippo_sebulba.yaml similarity index 84% rename from mava/configs/default_ff_ippo_seb.yaml rename to mava/configs/default_ff_ippo_sebulba.yaml index 204719232..3a7386969 100644 --- a/mava/configs/default_ff_ippo_seb.yaml +++ b/mava/configs/default_ff_ippo_sebulba.yaml @@ -3,5 +3,5 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp - - env: rware_gym + - env: lbf_gym - _self_ diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index b9f83f20b..946d92315 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -697,7 +697,7 @@ def run_experiment(_config: DictConfig) -> float: @hydra.main( - config_path="../../../configs", config_name="default_ff_ippo_seb.yaml", version_base="1.2" + config_path="../../../configs", config_name="default_ff_ippo_sebulba.yaml", version_base="1.2" ) def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" From dcff2a1c2f4a60272a13404a854ddb563b0b460c Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 15:42:31 +0100 Subject: [PATCH 087/139] fix: fixed stalling at the end of training --- mava/configs/arch/sebulba.yaml | 8 ++--- mava/evaluator.py | 4 +-- mava/systems/ppo/sebulba/ff_ippo.py | 48 +++++++++++++++++----------- mava/types.py | 2 -- mava/utils/sebulba_utils.py | 49 ++++++++++++++++------------- 5 files changed, 63 insertions(+), 48 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 5934bb3d5..342e0ee29 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -9,8 +9,8 @@ evaluation_greedy: False # Evaluate the policy greedily. If True the policy will # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. -num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. -num_absolute_metric_eval_episodes: 320 # Number of episodes to evaluate the absolute metric (the final evaluation). +num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. +num_absolute_metric_eval_episodes: 32 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 @@ -18,8 +18,8 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 2 # num of different threads/env batches per actor executor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices -pilpeline_queue_size : 5 +rollout_queue_size : 5 # The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. # Too large of a value with too many actors will lead to all of the updates getting wasted in old episodes # Too small of a value and the utility of having multiple actors is lost. -# A value of 1 leads to almost strictly on-policy training. +# A value of 1 with a single actor leads to almost strictly on-policy training. diff --git a/mava/evaluator.py b/mava/evaluator.py index e754899ae..83e8841c3 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -215,7 +215,7 @@ def get_sebulba_eval_fn( config: DictConfig, np_rng: np.random.Generator, absolute_metric: bool, -) -> EvalFn: +) -> Tuple[EvalFn, Any]: """Creates a function that can be used to evaluate agents on a given environment. Args: @@ -314,4 +314,4 @@ def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) metrics["steps_per_second"] = total_timesteps / (end_time - start_time) return metrics - return timed_eval_fn + return timed_eval_fn, env diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 946d92315..2fd098a5d 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -145,6 +145,7 @@ def get_action_and_value( ) rollout_pipeline.put(traj, timestep.observation, next_dones, time_dict) + env.close() def get_learner_fn( @@ -408,7 +409,7 @@ def learner_setup( # create temporory envoirnments. env = environments.make_gym_env(config, config.arch.num_envs) # Get number of agents and actions. - action_space = env.single_action_space + action_space = env.unwrapped.single_action_space config.system.num_agents = len(action_space) config.system.num_actions = int(action_space[0].n) @@ -438,7 +439,7 @@ def learner_setup( ) # Initialise observation: Select only obs for a single agent. - init_obs = jnp.array([env.single_observation_space.sample()]) + init_obs = jnp.array([env.unwrapped.single_observation_space.sample()]) init_action_mask = jnp.ones((config.system.num_agents, config.system.num_actions)) init_x = Observation(init_obs, init_action_mask) @@ -563,7 +564,7 @@ def run_experiment(_config: DictConfig) -> float: # Setup evaluator. # One key per device for evaluation. eval_act_fn = make_ff_eval_act_fn(apply_fns[0], config) - evaluator = get_eval_fn( + evaluator, evaluator_envs = get_eval_fn( environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=False ) @@ -596,25 +597,29 @@ def run_experiment(_config: DictConfig) -> float: # Executor setup and launch. unreplicated_inital_params = flax.jax_utils.unreplicate(learner_state.params) - params_sources: List[ParamsSource] = [] - thread_lifetimes: List[ThreadLifetime] = [] - pipeline = Pipeline(config.arch.pilpeline_queue_size, learner_devices) + + pipeline_lifetime = ThreadLifetime() + pipeline = Pipeline(config.arch.rollout_queue_size, learner_devices, pipeline_lifetime) pipeline.start() + params_sources: List[ParamsSource] = [] + actor_threads: List[threading.Thread] = [] + actors_lifetime = ThreadLifetime() + params_sources_lifetime = ThreadLifetime() + # Create the actor threads for d_idx, d_id in enumerate(config.arch.executor_device_ids): # Loop through each executor thread for thread_id in range(config.arch.n_threads_per_executor): seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() - params_source = ParamsSource(unreplicated_inital_params, devices[d_id]) + params_source = ParamsSource( + unreplicated_inital_params, devices[d_id], params_sources_lifetime + ) params_source.start() params_sources.append(params_source) - lifetime = ThreadLifetime() - thread_lifetimes.append(lifetime) - - threading.Thread( + actor = threading.Thread( target=rollout, args=( jax.device_put(key, devices[d_id]), @@ -624,10 +629,12 @@ def run_experiment(_config: DictConfig) -> float: apply_fns, d_id, seeds, - lifetime, + actors_lifetime, ), name=f"Actor-{thread_id + d_idx * config.arch.n_threads_per_executor}", - ).start() + ) + actor.start() + actor_threads.append(actor) learner_queue: Queue = Queue() threading.Thread( @@ -674,14 +681,19 @@ def run_experiment(_config: DictConfig) -> float: best_params = copy.deepcopy(unreplicated_actor_params) max_episode_return = episode_return - for thread_lifetime in thread_lifetimes: - thread_lifetime.stop() - + evaluator_envs.close() eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + # Make sure all of the actors are done befor closing the pipeline + actors_lifetime.stop() + for actor in actor_threads: + actor.join() + pipeline_lifetime.stop() + params_sources_lifetime.stop() + # Measure absolute metric. if config.arch.absolute_metric: - abs_metric_evaluator = get_eval_fn( + abs_metric_evaluator, abs_metric_evaluator_envs = get_eval_fn( environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=True ) key, eval_key = jax.random.split(key, 2) @@ -689,7 +701,7 @@ def run_experiment(_config: DictConfig) -> float: t = int(steps_per_rollout * (eval_step + 1)) logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) - + abs_metric_evaluator_envs.close() # Stop the logger. logger.stop() diff --git a/mava/types.py b/mava/types.py index 1d5878c5a..fe51ce293 100644 --- a/mava/types.py +++ b/mava/types.py @@ -156,8 +156,6 @@ class ExperimentOutput(NamedTuple, Generic[MavaState]): SebulbaLearnerFn = Callable[ [MavaState, MavaTransition, chex.Array, chex.Array], ExperimentOutput[MavaState] ] -EvalFn = Callable[[FrozenDict, chex.PRNGKey], ExperimentOutput[MavaState]] - ActorApply = Callable[[FrozenDict, Observation], Distribution] CriticApply = Callable[[FrozenDict, Observation], Value] RecActorApply = Callable[ diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index a5c0bdc14..e1fd34f37 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -27,6 +27,19 @@ # Copied from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py +class ThreadLifetime: + """Simple class for a mutable boolean that can be used to signal a thread to stop.""" + + def __init__(self) -> None: + self._stop = False + + def should_stop(self) -> bool: + return self._stop + + def stop(self) -> None: + self._stop = True + + class Pipeline(threading.Thread): """ The `Pipeline` shards trajectories into `learner_devices`, @@ -34,7 +47,7 @@ class Pipeline(threading.Thread): and limit the max number of samples in device memory at one time to avoid OOM issues. """ - def __init__(self, max_size: int, learner_devices: List[jax.Device]): + def __init__(self, max_size: int, learner_devices: List[jax.Device], lifetime: ThreadLifetime): """ Initializes the pipeline with a maximum size and the devices to shard trajectories across. @@ -46,6 +59,7 @@ def __init__(self, max_size: int, learner_devices: List[jax.Device]): self.learner_devices = learner_devices self.tickets_queue: queue.Queue = queue.Queue() self._queue: queue.Queue = queue.Queue(maxsize=max_size) + self.lifetime = lifetime def run(self) -> None: """ @@ -53,12 +67,15 @@ def run(self) -> None: start_condition and end_condition are used to ensure that only 1 thread is processing an item from the queue at one time, ensuring predictable memory usage. """ - while True: # todo Thread lifetime - start_condition, end_condition = self.tickets_queue.get() - with end_condition: - with start_condition: - start_condition.notify() - end_condition.wait() + while not self.lifetime.should_stop(): + try: + start_condition, end_condition = self.tickets_queue.get(timeout=1) + with end_condition: + with start_condition: + start_condition.notify() + end_condition.wait() + except queue.Empty: + continue def put( self, @@ -112,18 +129,19 @@ class ParamsSource(threading.Thread): `Learner` component to `Actor` components. """ - def __init__(self, init_value: Params, device: jax.Device): + def __init__(self, init_value: Params, device: jax.Device, lifetime: ThreadLifetime): super().__init__(name=f"ParamsSource-{device.id}") self.value: Params = jax.device_put(init_value, device) self.device = device self.new_value: queue.Queue = queue.Queue() + self.lifetime = lifetime def run(self) -> None: """ This function is responsible for updating the value of the `ParamSource` when a new value is available. """ - while True: + while not self.lifetime.should_stop(): try: waiting = self.new_value.get(block=True, timeout=1) self.value = jax.device_put(jax.block_until_ready(waiting), self.device) @@ -154,16 +172,3 @@ def __enter__(self) -> None: def __exit__(self, *args: Any) -> None: end = time.monotonic() self.to.append(end - self.start) - - -class ThreadLifetime: - """Simple class for a mutable boolean that can be used to signal a thread to stop.""" - - def __init__(self) -> None: - self._stop = False - - def should_stop(self) -> bool: - return self._stop - - def stop(self) -> None: - self._stop = True From d926c54f4b043f19e616cf28cfa5d4e1e09456c5 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 16:51:55 +0100 Subject: [PATCH 088/139] chore: code cleanup --- mava/configs/arch/sebulba.yaml | 6 +-- mava/configs/system/ppo/ff_ippo.yaml | 2 +- mava/evaluator.py | 6 +-- mava/systems/ppo/sebulba/ff_ippo.py | 76 +++++++++++++++++----------- mava/wrappers/gym.py | 7 ++- 5 files changed, 57 insertions(+), 40 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 342e0ee29..65be6e68a 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -2,15 +2,15 @@ architecture_name: sebulba # --- Training --- -num_envs: 32 # number of environments per thread. +num_envs: 2 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. +num_eval_episodes: 2 # Number of episodes to evaluate per evaluation. num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. -num_absolute_metric_eval_episodes: 32 # Number of episodes to evaluate the absolute metric (the final evaluation). +num_absolute_metric_eval_episodes: 2 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index 9efb0611a..622d94ca2 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -2,7 +2,7 @@ total_timesteps: ~ # Set the total environment steps. # If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. -num_updates: 1000 # Number of updates +num_updates: 200 # Number of updates seed: 42 # --- Agent observations --- diff --git a/mava/evaluator.py b/mava/evaluator.py index 83e8841c3..b16f43c75 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -284,8 +284,8 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: # find the first instance of done to get the metrics at that timestep, we don't # care about subsequent steps because we only the results from the first episode - done_idx = jnp.argmax(timesteps.last(), axis=0) - metrics = jax.tree_map(lambda m: m[done_idx, jnp.arange(n_parallel_envs)], metrics) + done_idx = np.argmax(timesteps.last(), axis=0) + metrics = jax.tree_map(lambda m: m[done_idx, np.arange(n_parallel_envs)], metrics) del metrics["is_terminal_step"] # uneeded for logging return key, metrics @@ -299,7 +299,7 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: metrics.append(metric) metrics: Metrics = jax.tree_map( - lambda *x: jnp.array(x).reshape(-1), *metrics + lambda *x: np.array(x).reshape(-1), *metrics ) # flatten metrics return metrics diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 2fd098a5d..00c699512 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -56,13 +56,28 @@ def rollout( key: chex.PRNGKey, config: DictConfig, - rollout_pipeline: Pipeline, + rollout_queue: Pipeline, params_source: ParamsSource, - apply_fns: Tuple, + apply_fns: Tuple[ActorApply, CriticApply], actor_device_id: int, seeds: List[int], thread_lifetime: ThreadLifetime, ) -> None: + """Runs rollouts to collect trajectories from the environment. + + Args: + key (chex.PRNGKey): The PRNGkey. + config (DictConfig): Configuration settings for the environment and rollout. + rollout_queue (Pipeline): Queue for sending collected rollouts. + params_source (ParamsSource): Source for fetching the latest network parameters. + apply_fns (Tuple): Functions for running the actor and critic networks. + actor_device_id (int): Actor device id for the current thread. + seeds (List[int]): Seeds for initializing the environment. + thread_lifetime (ThreadLifetime): Manages the thread's lifecycle. + + Returns: + None: This function updates the rollout queue with collected data. + """ # setup env = environments.make_gym_env(config, config.arch.num_envs) current_actor_device = jax.devices()[actor_device_id] @@ -88,10 +103,7 @@ def get_action_and_value( timestep = env.reset(seed=seeds) - next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, num_agents).reshape(num_envs, -1), - timestep.last(), - ) + next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) move_to_device = lambda x: jax.device_put(x, device=current_actor_device) @@ -99,13 +111,20 @@ def get_action_and_value( while not thread_lifetime.should_stop(): # Rollout traj: List = [] - time_dict: Dict[str, List[float]] = {"single_rollout": [], "env_step_time": []} + time_dict: Dict[str, List[float]] = { + "single_rollout_time": [], + "env_step_time": [], + "getting_params_time": [], + "putting_rollout_time": [], + } # Loop over the rollout length - with RecordTimeTo(time_dict["single_rollout"]): + with RecordTimeTo(time_dict["single_rollout_time"]): for _ in range(config.system.rollout_length): # Get the latest parameters from the learner - params = params_source.get() + + with RecordTimeTo(time_dict["getting_params_time"]): + params = params_source.get() cached_next_obs = jax.tree.map(move_to_device, timestep.observation) cached_next_dones = move_to_device(next_dones) @@ -126,10 +145,7 @@ def get_action_and_value( cpu_action.swapaxes(0, 1) ) # (num_env, num_agents) --> (num_agents, num_env) - next_dones = jax.tree_util.tree_map( - lambda x: jnp.repeat(x, num_agents).reshape(num_envs, -1), - timestep.last(), - ) + next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) # Append data to storage traj.append( @@ -143,8 +159,9 @@ def get_action_and_value( info=timestep.extras, ) ) - - rollout_pipeline.put(traj, timestep.observation, next_dones, time_dict) + # send trajectories to learner + with RecordTimeTo(time_dict["putting_rollout_time"]): + rollout_queue.put(traj, timestep.observation, next_dones, time_dict) env.close() @@ -167,10 +184,9 @@ def _update_step( ) -> Tuple[LearnerState, Tuple]: """A single update of the network. - This function steps the environment and records the trajectory batch for - training. It then calculates advantages and targets based on the recorded - trajectory and updates the actor and critic networks based on the calculated - losses. + This function calculates advantages and targets based on the trajectories + from the actor and updates the actor and critic networks based on the + calculated losses. Args: learner_state (NamedTuple): @@ -295,12 +311,12 @@ def _critic_loss_fn( # pmean over devices. actor_grads, actor_loss_info = jax.lax.pmean( (actor_grads, actor_loss_info), - axis_name="device", + axis_name="learner_devices", ) - # pmean over devices. + # pmean over learner devices. critic_grads, critic_loss_info = jax.lax.pmean( - (critic_grads, critic_loss_info), axis_name="device" + (critic_grads, critic_loss_info), axis_name="learner_devices" ) # UPDATE ACTOR PARAMS AND OPTIMISER STATE @@ -460,7 +476,7 @@ def learner_setup( # Get batched iterated update and replicate it to pmap it over learner cores. learn = get_learner_fn(apply_fns, update_fns, config) - learn = jax.pmap(learn, axis_name="device", devices=learner_devices) + learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) # Load model from checkpoint if specified. if config.logger.checkpointing.load_model: @@ -523,10 +539,10 @@ def learner( episode_metrics, train_metrics = jax.tree.map(lambda *x: np.asarray(x), *metrics) rollout_times = jax.tree.map(lambda *x: np.mean(x), *rollout_times) - times_dict = rollout_times | eval_times - times_dict = jax.tree.map(np.mean, times_dict, is_leaf=lambda x: isinstance(x, list)) + timing_dict = rollout_times | eval_times + timing_dict = jax.tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) - learner_queue.put((episode_metrics, train_metrics, learner_state, times_dict)) + learner_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) def run_experiment(_config: DictConfig) -> float: @@ -646,17 +662,19 @@ def run_experiment(_config: DictConfig) -> float: max_episode_return = -jnp.inf best_params = unreplicated_inital_params.actor_params + # This is the main loop, all it does is evaluation and logging. + # Acting and learning is happening in their own threads. + # This loop waits for the learner to finish an update before evaluation and logging. for eval_step in range(config.arch.num_evaluation): - # Get the next set of params and metrics from the evaluator + # Get the next set of params and metrics from the learner episode_metrics, train_metrics, learner_state, times_dict = learner_queue.get() t = int(steps_per_rollout * (eval_step + 1)) - times_dict["timestep"] = t logger.log(times_dict, t, eval_step, LogEvent.MISC) episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / times_dict["single_rollout"] + episode_metrics["steps_per_second"] = steps_per_rollout / times_dict["single_rollout_time"] if ep_completed: logger.log(episode_metrics, t, eval_step, LogEvent.ACT) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 35bd674bd..6dcbf9963 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -196,7 +196,7 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: class GymToJumanji(gymnasium.Wrapper): - """Converts Gym outputs to Jumanji timesteps""" + """Converts from the Gym API to the dm_env API, Jumanji's Timestep type.""" def reset( self, seed: Optional[list[int]] = None, options: Optional[list[dict]] = None @@ -227,9 +227,8 @@ def _format_observation( ) -> Union[Observation, ObservationGlobalState]: """Create an observation from the raw observation and environment state.""" - obs = np.array(obs).swapaxes( - 0, 1 - ) # (num_agents, num_envs, ...) -> (num_envs, num_agents, ...) + # (num_agents, num_envs, ...) -> (num_envs, num_agents, ...) + obs = np.array(obs).swapaxes(0, 1) action_mask = np.stack(info["actions_mask"]) obs_data = {"agents_view": obs, "action_mask": action_mask} From 7e4698a1446bc55d1e5d7aa17ad294d8a2142865 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Mon, 29 Jul 2024 17:25:29 +0100 Subject: [PATCH 089/139] chore : various changes --- mava/configs/arch/sebulba.yaml | 6 +++--- mava/configs/system/ppo/ff_ippo.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 14 +++++++------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 65be6e68a..0c1c8880d 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -2,15 +2,15 @@ architecture_name: sebulba # --- Training --- -num_envs: 2 # number of environments per thread. +num_envs: 32 # number of environments per thread. # --- Evaluation --- evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_eval_episodes: 2 # Number of episodes to evaluate per evaluation. +num_eval_episodes: 200 # Number of episodes to evaluate per evaluation. num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. -num_absolute_metric_eval_episodes: 2 # Number of episodes to evaluate the absolute metric (the final evaluation). +num_absolute_metric_eval_episodes: 32 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 diff --git a/mava/configs/system/ppo/ff_ippo.yaml b/mava/configs/system/ppo/ff_ippo.yaml index 622d94ca2..9efb0611a 100644 --- a/mava/configs/system/ppo/ff_ippo.yaml +++ b/mava/configs/system/ppo/ff_ippo.yaml @@ -2,7 +2,7 @@ total_timesteps: ~ # Set the total environment steps. # If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. -num_updates: 200 # Number of updates +num_updates: 1000 # Number of updates seed: 42 # --- Agent observations --- diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 00c699512..31c7e26af 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -27,6 +27,7 @@ import optax from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict +from jax import tree from omegaconf import DictConfig, OmegaConf from optax._src.base import OptState from rich.pretty import pprint @@ -126,7 +127,7 @@ def get_action_and_value( with RecordTimeTo(time_dict["getting_params_time"]): params = params_source.get() - cached_next_obs = jax.tree.map(move_to_device, timestep.observation) + cached_next_obs = tree.map(move_to_device, timestep.observation) cached_next_dones = move_to_device(next_dones) # Get action and value @@ -474,7 +475,6 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) - # Get batched iterated update and replicate it to pmap it over learner cores. learn = get_learner_fn(apply_fns, update_fns, config) learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) @@ -536,11 +536,11 @@ def learner( source.update(unreplicated_params) # Pass to the evaluator - episode_metrics, train_metrics = jax.tree.map(lambda *x: np.asarray(x), *metrics) + episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) - rollout_times = jax.tree.map(lambda *x: np.mean(x), *rollout_times) + rollout_times = tree.map(lambda *x: np.mean(x), *rollout_times) timing_dict = rollout_times | eval_times - timing_dict = jax.tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) + timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) learner_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) @@ -553,8 +553,8 @@ def run_experiment(_config: DictConfig) -> float: learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] # PRNG keys. - key, key_e, actor_net_key, critic_net_key = jax.random.split( - jax.random.PRNGKey(config.system.seed), num=4 + key, actor_net_key, critic_net_key = jax.random.split( + jax.random.PRNGKey(config.system.seed), num=3 ) # Sanity check of config From 6dac8c3206b806db26d598158b8de1aad571c755 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 30 Jul 2024 12:26:49 +0100 Subject: [PATCH 090/139] fix: prevent the pipeline from stalling and a lot of cleanup --- mava/systems/ppo/sebulba/ff_ippo.py | 88 +++++++++++++---------------- mava/utils/sebulba_utils.py | 25 ++++++++ mava/wrappers/gym.py | 2 +- 3 files changed, 65 insertions(+), 50 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 31c7e26af..04aeda480 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -48,7 +48,13 @@ from mava.utils.checkpointing import Checkpointer from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.sebulba_utils import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime +from mava.utils.sebulba_utils import ( + ParamsSource, + Pipeline, + RecordTimeTo, + ThreadLifetime, + check_config, +) from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -69,15 +75,13 @@ def rollout( Args: key (chex.PRNGKey): The PRNGkey. config (DictConfig): Configuration settings for the environment and rollout. - rollout_queue (Pipeline): Queue for sending collected rollouts. - params_source (ParamsSource): Source for fetching the latest network parameters. + rollout_queue (Pipeline): Queue for sending collected rollouts to the learner. + params_source (ParamsSource): Source for fetching the latest network parameters + from the learner. apply_fns (Tuple): Functions for running the actor and critic networks. - actor_device_id (int): Actor device id for the current thread. + actor_device_id (int): Device ID for this actor thread. seeds (List[int]): Seeds for initializing the environment. thread_lifetime (ThreadLifetime): Manages the thread's lifecycle. - - Returns: - None: This function updates the rollout queue with collected data. """ # setup env = environments.make_gym_env(config, config.arch.num_envs) @@ -115,8 +119,8 @@ def get_action_and_value( time_dict: Dict[str, List[float]] = { "single_rollout_time": [], "env_step_time": [], - "getting_params_time": [], - "putting_rollout_time": [], + "get_params_time": [], + "put_rollout_time": [], } # Loop over the rollout length @@ -124,7 +128,7 @@ def get_action_and_value( for _ in range(config.system.rollout_length): # Get the latest parameters from the learner - with RecordTimeTo(time_dict["getting_params_time"]): + with RecordTimeTo(time_dict["get_params_time"]): params = params_source.get() cached_next_obs = tree.map(move_to_device, timestep.observation) @@ -142,9 +146,8 @@ def get_action_and_value( cpu_action = jax.device_get(action) with RecordTimeTo(time_dict["env_step_time"]): - timestep = env.step( - cpu_action.swapaxes(0, 1) - ) # (num_env, num_agents) --> (num_agents, num_env) + # (num_env, num_agents) --> (num_agents, num_env) + timestep = env.step(cpu_action.swapaxes(0, 1)) next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) @@ -161,7 +164,7 @@ def get_action_and_value( ) ) # send trajectories to learner - with RecordTimeTo(time_dict["putting_rollout_time"]): + with RecordTimeTo(time_dict["put_rollout_time"]): rollout_queue.put(traj, timestep.observation, next_dones, time_dict) env.close() @@ -190,12 +193,10 @@ def _update_step( calculated losses. Args: - learner_state (NamedTuple): - - params (Params): The current model parameters. - - opt_states (OptStates): The current optimizer states. - - key (PRNGKey): The random number generator state. - - env_state (State): The environment state. - - last_timestep (TimeStep): The last timestep in the current trajectory. + learner_state (LearnerState): contains all the items needed for learning. + traj_batch (PPOTransition): the batch of data to learn with. + last_obs (Observation): the final observations (for bootstrapping in GAE). + last_dones (Array): the final dones (for bootstrapping in GAE). _ (Any): The current metrics info. """ @@ -309,7 +310,7 @@ def _critic_loss_fn( # Compute the parallel mean (pmean) over the batch. # This calculation is inspired by the Anakin architecture demo notebook. # available at https://tinyurl.com/26tdzs5x - # pmean over devices. + # pmean over learner devices. actor_grads, actor_loss_info = jax.lax.pmean( (actor_grads, actor_loss_info), axis_name="learner_devices", @@ -509,20 +510,20 @@ def learner( learn: SebulbaLearnerFn[LearnerState, PPOTransition], learner_state: LearnerState, config: DictConfig, - learner_queue: Queue, + eval_queue: Queue, pipeline: Pipeline, params_sources: Sequence[ParamsSource], ) -> None: for _eval_step in range(config.arch.num_evaluation): metrics: List[Tuple[Dict, Dict]] = [] rollout_times: List[Dict] = [] - eval_times: Dict[str, List[float]] = {"evaluator_blocked_time": [], "evaluation_time": []} + eval_times: Dict[str, List[float]] = {"rollout_get_time": [], "learning_time": []} for _update in range(config.system.num_updates_per_eval): - with RecordTimeTo(eval_times["evaluator_blocked_time"]): + with RecordTimeTo(eval_times["rollout_get_time"]): traj_batch, last_obs, last_dones, rollout_time = pipeline.get(block=True) - with RecordTimeTo(eval_times["evaluation_time"]): + with RecordTimeTo(eval_times["learning_time"]): learner_state, episode_metrics, train_metrics = learn( learner_state, traj_batch, last_obs, last_dones ) @@ -542,7 +543,7 @@ def learner( timing_dict = rollout_times | eval_times timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) - learner_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) + eval_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) def run_experiment(_config: DictConfig) -> float: @@ -557,26 +558,14 @@ def run_experiment(_config: DictConfig) -> float: jax.random.PRNGKey(config.system.seed), num=3 ) - # Sanity check of config - assert ( - config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must to be divisible by the number of learners." - - assert ( - int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.arch.n_threads_per_executor - % config.system.num_minibatches - == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches." + # Numpy RNG. + np_rng = np.random.default_rng(config.system.seed) # Setup learner. learn, apply_fns, learner_state = learner_setup( (key, actor_net_key, critic_net_key), config, learner_devices ) - # Generate Numpy RNG for reproducibility - np_rng = np.random.default_rng(config.system.seed) - # Setup evaluator. # One key per device for evaluation. eval_act_fn = make_ff_eval_act_fn(apply_fns[0], config) @@ -586,11 +575,7 @@ def run_experiment(_config: DictConfig) -> float: # Calculate total timesteps. config = check_total_timesteps(config) - assert ( - config.system.num_updates > config.arch.num_evaluation - ), "Number of updates per evaluation must be less than total number of updates." - # Calculate number of updates per evaluation. - config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + check_config(config) steps_per_rollout = ( config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval @@ -652,11 +637,11 @@ def run_experiment(_config: DictConfig) -> float: actor.start() actor_threads.append(actor) - learner_queue: Queue = Queue() + eval_queue: Queue = Queue() threading.Thread( target=learner, name="Learner", - args=(learn, learner_state, config, learner_queue, pipeline, params_sources), + args=(learn, learner_state, config, eval_queue, pipeline, params_sources), ).start() max_episode_return = -jnp.inf @@ -667,7 +652,7 @@ def run_experiment(_config: DictConfig) -> float: # This loop waits for the learner to finish an update before evaluation and logging. for eval_step in range(config.arch.num_evaluation): # Get the next set of params and metrics from the learner - episode_metrics, train_metrics, learner_state, times_dict = learner_queue.get() + episode_metrics, train_metrics, learner_state, times_dict = eval_queue.get() t = int(steps_per_rollout * (eval_step + 1)) times_dict["timestep"] = t @@ -702,12 +687,17 @@ def run_experiment(_config: DictConfig) -> float: evaluator_envs.close() eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) - # Make sure all of the actors are done befor closing the pipeline + # Make sure all of the Threads are closed. actors_lifetime.stop() for actor in actor_threads: actor.join() + pipeline_lifetime.stop() + pipeline.join() + params_sources_lifetime.stop() + for params_source in params_sources: + params_source.join() # Measure absolute metric. if config.arch.absolute_metric: diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index e1fd34f37..8e84b4267 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -21,6 +21,7 @@ import jax import jax.numpy as jnp from chex import Array +from omegaconf import DictConfig from mava.systems.ppo.types import Params, PPOTransition # todo: remove the ppo dependencies from mava.types import Observation, ObservationGlobalState @@ -103,6 +104,12 @@ def put( # [(num_envs / num_learner_devices, num_agents)] * num_learner_devices sharded_next_dones = self.shard_split_playload(next_dones, 0) + # If the queue gets full at any point we prioritize taking new episodes. + # This also prevents the pipeline from stalling if the learner thread terminates + # before the actors finish putting the episodes in it. + if self._queue.full(): + self._queue.get() + self._queue.put((sharded_traj, sharded_next_obs, sharded_next_dones, time_dict)) with end_condition: @@ -172,3 +179,21 @@ def __enter__(self) -> None: def __exit__(self, *args: Any) -> None: end = time.monotonic() self.to.append(end - self.start) + + +def check_config(config: DictConfig) -> None: + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + + assert ( + config.arch.num_envs % len(config.arch.learner_device_ids) == 0 + ), "The number of environments must be divisible by the number of learners." + + assert ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.arch.n_threads_per_executor + % config.system.num_minibatches + == 0 + ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches." diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 6dcbf9963..e14389b24 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -196,7 +196,7 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: class GymToJumanji(gymnasium.Wrapper): - """Converts from the Gym API to the dm_env API, Jumanji's Timestep type.""" + """Converts from the Gym API to the dm_env API, using Jumanji's Timestep type.""" def reset( self, seed: Optional[list[int]] = None, options: Optional[list[dict]] = None From 23b582c6359d995f18f41b1c590e1146efc14c49 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 30 Jul 2024 12:44:26 +0100 Subject: [PATCH 091/139] chore : better error messeages --- mava/utils/sebulba_utils.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index 8e84b4267..9077925a8 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -187,13 +187,16 @@ def check_config(config: DictConfig) -> None: ), "Number of updates per evaluation must be less than total number of updates." config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation - assert ( - config.arch.num_envs % len(config.arch.learner_device_ids) == 0 - ), "The number of environments must be divisible by the number of learners." + assert config.arch.num_envs % len(config.arch.learner_device_ids) == 0, ( + "Number of environments must be divisible by the number of learner." + + "The output of each actor is equally split across the learners." + ) - assert ( + num_eval_samples = ( int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.arch.n_threads_per_executor - % config.system.num_minibatches - == 0 - ), "int(local_num_envs / len(learner_device_ids)) must be divisible by num_minibatches." + * config.system.rollout_length + ) + assert num_eval_samples % config.system.num_minibatches == 0, ( + f"Number of training samples per evaluator ({num_eval_samples})" + + f"must be divisible by num_minibatches ({config.system.num_minibatches})." + ) From c71dad86a0fd3d6c13f9ce2bdc173c73d88939fd Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Tue, 30 Jul 2024 13:23:44 +0100 Subject: [PATCH 092/139] fix: changed the timestep discount --- mava/wrappers/gym.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index e14389b24..ee4339afd 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -208,8 +208,9 @@ def reset( ep_done = np.zeros(num_envs, dtype=float) rewards = np.zeros((num_envs, num_agents), dtype=float) + teminated = np.zeros((num_envs, num_agents), dtype=float) - timestep = self._create_timestep(obs, ep_done, rewards, info) + timestep = self._create_timestep(obs, ep_done, teminated, rewards, info) return timestep @@ -218,7 +219,7 @@ def step(self, action: list) -> TimeStep: ep_done = np.logical_or(terminated, truncated).all(axis=1) - timestep = self._create_timestep(obs, ep_done, rewards, info) + timestep = self._create_timestep(obs, ep_done, terminated, rewards, info) return timestep @@ -240,16 +241,17 @@ def _format_observation( return Observation(**obs_data) def _create_timestep( - self, obs: NDArray, ep_done: NDArray, rewards: NDArray, info: Dict + self, obs: NDArray, ep_done: NDArray, terminated: NDArray, rewards: NDArray, info: Dict ) -> TimeStep: obs = self._format_observation(obs, info) extras = jax.tree.map(lambda *x: np.stack(x), *info["metrics"]) step_type = np.where(ep_done, StepType.LAST, StepType.MID) + terminated = np.all(terminated, axis=1) return TimeStep( step_type=step_type, reward=rewards, - discount=1.0 - ep_done, + discount=1.0 - terminated, observation=obs, extras=extras, ) From bfea3aab662646a0a1dd71aaf4d433fefe5c2116 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Tue, 30 Jul 2024 16:03:03 +0200 Subject: [PATCH 093/139] chore: very nitpicky clean ups --- mava/systems/ppo/sebulba/ff_ippo.py | 171 +++++++++++----------------- 1 file changed, 67 insertions(+), 104 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 04aeda480..38cb2905b 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -27,9 +27,9 @@ import optax from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate from jax import tree from omegaconf import DictConfig, OmegaConf -from optax._src.base import OptState from rich.pretty import pprint from mava.evaluator import get_sebulba_eval_fn as get_eval_fn @@ -85,9 +85,10 @@ def rollout( """ # setup env = environments.make_gym_env(config, config.arch.num_envs) - current_actor_device = jax.devices()[actor_device_id] actor_apply_fn, critic_apply_fn = apply_fns num_agents, num_envs = config.system.num_agents, config.arch.num_envs + current_actor_device = jax.devices()[actor_device_id] + move_to_device = lambda x: jax.device_put(x, device=current_actor_device) # Define the util functions: select action function and prepare data to share it with learner. @jax.jit @@ -107,40 +108,31 @@ def get_action_and_value( return action, log_prob, value, key timestep = env.reset(seed=seeds) - next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - move_to_device = lambda x: jax.device_put(x, device=current_actor_device) - # Loop till the desired num_updates is reached. while not thread_lifetime.should_stop(): # Rollout - traj: List = [] + traj: List[PPOTransition] = [] time_dict: Dict[str, List[float]] = { "single_rollout_time": [], "env_step_time": [], "get_params_time": [], - "put_rollout_time": [], + "rollout_put_time": [], } # Loop over the rollout length with RecordTimeTo(time_dict["single_rollout_time"]): for _ in range(config.system.rollout_length): - # Get the latest parameters from the learner - with RecordTimeTo(time_dict["get_params_time"]): + # Get the latest parameters from the learner params = params_source.get() cached_next_obs = tree.map(move_to_device, timestep.observation) cached_next_dones = move_to_device(next_dones) # Get action and value - ( - action, - log_prob, - value, - key, - ) = get_action_and_value(params, cached_next_obs, key) + action, log_prob, value, key = get_action_and_value(params, cached_next_obs, key) # Step the environment cpu_action = jax.device_get(action) @@ -152,19 +144,15 @@ def get_action_and_value( next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) # Append data to storage + reward = timestep.reward + info = timestep.extras traj.append( PPOTransition( - done=cached_next_dones, - action=action, - value=value, - reward=timestep.reward, - log_prob=log_prob, - obs=cached_next_obs, - info=timestep.extras, + cached_next_dones, action, value, reward, log_prob, cached_next_obs, info ) ) # send trajectories to learner - with RecordTimeTo(time_dict["put_rollout_time"]): + with RecordTimeTo(time_dict["rollout_put_time"]): rollout_queue.put(traj, timestep.observation, next_dones, time_dict) env.close() @@ -189,8 +177,7 @@ def _update_step( """A single update of the network. This function calculates advantages and targets based on the trajectories - from the actor and updates the actor and critic networks based on the - calculated losses. + from the actor and updates the actor and critic networks based on the losses. Args: learner_state (LearnerState): contains all the items needed for learning. @@ -222,7 +209,7 @@ def _get_advantages( ) return advantages, advantages + traj_batch.value - # CALCULATE ADVANTAGE + # Calculate advantage params, opt_states, key, _, _ = learner_state last_val = critic_apply_fn(params.critic_params, last_obs) advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) @@ -233,23 +220,22 @@ def _update_epoch(update_state: Tuple, _: Any) -> Tuple: def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: """Update the network for a single minibatch.""" - # UNPACK TRAIN STATE AND BATCH INFO + # Unpack train state and batch info params, opt_states, key = train_state traj_batch, advantages, targets = batch_info def _actor_loss_fn( actor_params: FrozenDict, - actor_opt_state: OptState, traj_batch: PPOTransition, gae: chex.Array, key: chex.PRNGKey, ) -> Tuple: """Calculate the actor loss.""" - # RERUN NETWORK + # Rerun network actor_policy = actor_apply_fn(actor_params, traj_batch.obs) log_prob = actor_policy.log_prob(traj_batch.action) - # CALCULATE ACTOR LOSS + # Calculate actor loss ratio = jnp.exp(log_prob - traj_batch.log_prob) gae = (gae - gae.mean()) / (gae.std() + 1e-8) loss_actor1 = ratio * gae @@ -270,16 +256,13 @@ def _actor_loss_fn( return total_loss_actor, (loss_actor, entropy) def _critic_loss_fn( - critic_params: FrozenDict, - critic_opt_state: OptState, - traj_batch: PPOTransition, - targets: chex.Array, + critic_params: FrozenDict, traj_batch: PPOTransition, targets: chex.Array ) -> Tuple: """Calculate the critic loss.""" - # RERUN NETWORK + # Rerun network value = critic_apply_fn(critic_params, traj_batch.obs) - # CALCULATE VALUE LOSS + # Calculate value loss value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( -config.system.clip_eps, config.system.clip_eps ) @@ -290,21 +273,17 @@ def _critic_loss_fn( critic_total_loss = config.system.vf_coef * value_loss return critic_total_loss, (value_loss) - # CALCULATE ACTOR LOSS + # Calculate actor loss key, entropy_key = jax.random.split(key) actor_grad_fn = jax.value_and_grad(_actor_loss_fn, has_aux=True) actor_loss_info, actor_grads = actor_grad_fn( - params.actor_params, - opt_states.actor_opt_state, - traj_batch, - advantages, - entropy_key, + params.actor_params, traj_batch, advantages, entropy_key ) - # CALCULATE CRITIC LOSS + # Calculate critic loss critic_grad_fn = jax.value_and_grad(_critic_loss_fn, has_aux=True) critic_loss_info, critic_grads = critic_grad_fn( - params.critic_params, opt_states.critic_opt_state, traj_batch, targets + params.critic_params, traj_batch, targets ) # Compute the parallel mean (pmean) over the batch. @@ -321,22 +300,22 @@ def _critic_loss_fn( (critic_grads, critic_loss_info), axis_name="learner_devices" ) - # UPDATE ACTOR PARAMS AND OPTIMISER STATE + # Update actor params and optimiser state actor_updates, actor_new_opt_state = actor_update_fn( actor_grads, opt_states.actor_opt_state ) actor_new_params = optax.apply_updates(params.actor_params, actor_updates) - # UPDATE CRITIC PARAMS AND OPTIMISER STATE + # Update critic params and optimiser state critic_updates, critic_new_opt_state = critic_update_fn( critic_grads, opt_states.critic_opt_state ) critic_new_params = optax.apply_updates(params.critic_params, critic_updates) - # PACK NEW PARAMS AND OPTIMISER STATE + # Pack new params and optimiser state new_params = Params(actor_new_params, critic_new_params) new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) - # PACK LOSS INFO + # Pack loss info total_loss = actor_loss_info[0] + critic_loss_info[0] value_loss = critic_loss_info[1] actor_loss = actor_loss_info[1][0] @@ -351,21 +330,19 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) - # SHUFFLE MINIBATCHES + # Shuffle minibatches batch_size = config.system.rollout_length * ( config.arch.num_envs // len(config.arch.learner_device_ids) ) permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) - batch = jax.tree_util.tree_map(lambda x: merge_leading_dims(x, 2), batch) - shuffled_batch = jax.tree_util.tree_map( - lambda x: jnp.take(x, permutation, axis=0), batch - ) - minibatches = jax.tree_util.tree_map( + batch = tree.map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = tree.map(lambda x: jnp.take(x, permutation, axis=0), batch) + minibatches = tree.map( lambda x: jnp.reshape(x, (config.system.num_minibatches, -1, *x.shape[1:])), shuffled_batch, ) - # UPDATE MINIBATCHES + # Update minibatches (params, opt_states, entropy_key), loss_info = jax.lax.scan( _update_minibatch, (params, opt_states, entropy_key), minibatches ) @@ -374,7 +351,7 @@ def _critic_loss_fn( return update_state, loss_info update_state = (params, opt_states, traj_batch, advantages, targets, key) - # UPDATE EPOCHS + # Update epochs update_state, loss_info = jax.lax.scan( _update_epoch, update_state, None, config.system.ppo_epochs ) @@ -418,7 +395,7 @@ def learner_fn( def learner_setup( - keys: chex.Array, config: DictConfig, learner_devices: List + key: chex.PRNGKey, config: DictConfig, learner_devices: List ) -> Tuple[ SebulbaLearnerFn[LearnerState, PPOTransition], Tuple[ActorApply, CriticApply], LearnerState ]: @@ -432,7 +409,7 @@ def learner_setup( config.system.num_actions = int(action_space[0].n) # PRNG keys. - key, actor_net_key, critic_net_key = keys + key, actor_key, critic_key = jax.random.split(key, 3) # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) @@ -462,11 +439,11 @@ def learner_setup( init_x = Observation(init_obs, init_action_mask) # Initialise actor params and optimiser state. - actor_params = actor_network.init(actor_net_key, init_x) + actor_params = actor_network.init(actor_key, init_x) actor_opt_state = actor_optim.init(actor_params) # Initialise critic params and optimiser state. - critic_params = critic_network.init(critic_net_key, init_x) + critic_params = critic_network.init(critic_key, init_x) critic_opt_state = critic_optim.init(critic_params) # Pack params. @@ -517,13 +494,13 @@ def learner( for _eval_step in range(config.arch.num_evaluation): metrics: List[Tuple[Dict, Dict]] = [] rollout_times: List[Dict] = [] - eval_times: Dict[str, List[float]] = {"rollout_get_time": [], "learning_time": []} + learn_times: Dict[str, List[float]] = {"rollout_get_time": [], "learning_time": []} for _update in range(config.system.num_updates_per_eval): - with RecordTimeTo(eval_times["rollout_get_time"]): + with RecordTimeTo(learn_times["rollout_get_time"]): traj_batch, last_obs, last_dones, rollout_time = pipeline.get(block=True) - with RecordTimeTo(eval_times["learning_time"]): + with RecordTimeTo(learn_times["learning_time"]): learner_state, episode_metrics, train_metrics = learn( learner_state, traj_batch, last_obs, last_dones ) @@ -531,7 +508,7 @@ def learner( metrics.append((episode_metrics, train_metrics)) rollout_times.append(rollout_time) - unreplicated_params = flax.jax_utils.unreplicate(learner_state.params) + unreplicated_params = unreplicate(learner_state.params) for source in params_sources: source.update(unreplicated_params) @@ -540,7 +517,7 @@ def learner( episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) rollout_times = tree.map(lambda *x: np.mean(x), *rollout_times) - timing_dict = rollout_times | eval_times + timing_dict = rollout_times | learn_times timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) eval_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) @@ -553,18 +530,12 @@ def run_experiment(_config: DictConfig) -> float: devices = jax.devices() learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] - # PRNG keys. - key, actor_net_key, critic_net_key = jax.random.split( - jax.random.PRNGKey(config.system.seed), num=3 - ) - - # Numpy RNG. + # JAX and numpy RNGs + key = jax.random.PRNGKey(config.system.seed) np_rng = np.random.default_rng(config.system.seed) # Setup learner. - learn, apply_fns, learner_state = learner_setup( - (key, actor_net_key, critic_net_key), config, learner_devices - ) + learn, apply_fns, learner_state = learner_setup(key, config, learner_devices) # Setup evaluator. # One key per device for evaluation. @@ -583,9 +554,9 @@ def run_experiment(_config: DictConfig) -> float: # Logger setup logger = MavaLogger(config) - cfg: Dict = OmegaConf.to_container(config, resolve=True) - cfg["arch"]["devices"] = jax.devices() - pprint(cfg) + print_cfg: Dict = OmegaConf.to_container(config, resolve=True) + print_cfg["arch"]["devices"] = jax.devices() + pprint(print_cfg) # Set up checkpointer save_checkpoint = config.logger.checkpointing.save_model @@ -597,13 +568,14 @@ def run_experiment(_config: DictConfig) -> float: ) # Executor setup and launch. - unreplicated_inital_params = flax.jax_utils.unreplicate(learner_state.params) + inital_params = unreplicate(learner_state.params) - pipeline_lifetime = ThreadLifetime() - pipeline = Pipeline(config.arch.rollout_queue_size, learner_devices, pipeline_lifetime) - pipeline.start() + # the rollout queue/ the pipe between actor and learner + pipe_lifetime = ThreadLifetime() + pipe = Pipeline(config.arch.rollout_queue_size, learner_devices, pipe_lifetime) + pipe.start() - params_sources: List[ParamsSource] = [] + param_sources: List[ParamsSource] = [] actor_threads: List[threading.Thread] = [] actors_lifetime = ThreadLifetime() params_sources_lifetime = ThreadLifetime() @@ -613,25 +585,16 @@ def run_experiment(_config: DictConfig) -> float: # Loop through each executor thread for thread_id in range(config.arch.n_threads_per_executor): seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() + key, act_key = jax.random.split(key) + act_key = jax.device_put(key, devices[d_id]) - params_source = ParamsSource( - unreplicated_inital_params, devices[d_id], params_sources_lifetime - ) - params_source.start() - params_sources.append(params_source) + param_source = ParamsSource(inital_params, devices[d_id], params_sources_lifetime) + param_source.start() + param_sources.append(param_source) actor = threading.Thread( target=rollout, - args=( - jax.device_put(key, devices[d_id]), - config, - pipeline, - params_sources[-1], - apply_fns, - d_id, - seeds, - actors_lifetime, - ), + args=(act_key, config, pipe, param_source, apply_fns, d_id, seeds, actors_lifetime), name=f"Actor-{thread_id + d_idx * config.arch.n_threads_per_executor}", ) actor.start() @@ -641,11 +604,11 @@ def run_experiment(_config: DictConfig) -> float: threading.Thread( target=learner, name="Learner", - args=(learn, learner_state, config, eval_queue, pipeline, params_sources), + args=(learn, learner_state, config, eval_queue, pipe, param_sources), ).start() max_episode_return = -jnp.inf - best_params = unreplicated_inital_params.actor_params + best_params = inital_params.actor_params # This is the main loop, all it does is evaluation and logging. # Acting and learning is happening in their own threads. @@ -665,7 +628,7 @@ def run_experiment(_config: DictConfig) -> float: logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - unreplicated_actor_params = flax.jax_utils.unreplicate(learner_state.params.actor_params) + unreplicated_actor_params = unreplicate(learner_state.params.actor_params) key, eval_key = jax.random.split(key, 2) eval_metrics = evaluator(unreplicated_actor_params, eval_key, {}) logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) @@ -692,12 +655,12 @@ def run_experiment(_config: DictConfig) -> float: for actor in actor_threads: actor.join() - pipeline_lifetime.stop() - pipeline.join() + pipe_lifetime.stop() + pipe.join() params_sources_lifetime.stop() - for params_source in params_sources: - params_source.join() + for param_source in param_sources: + param_source.join() # Measure absolute metric. if config.arch.absolute_metric: From de92f5a9e6f41825fabf8c0935215d5ae9f857bc Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Tue, 30 Jul 2024 16:30:55 +0200 Subject: [PATCH 094/139] feat: pass timestep instead of obs and done and fix potential race condition in pipeline --- mava/systems/ppo/sebulba/ff_ippo.py | 32 +++++++------------ mava/types.py | 4 +-- mava/utils/sebulba_utils.py | 49 ++++++++++------------------- 3 files changed, 30 insertions(+), 55 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 38cb2905b..f4905c1c6 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -153,7 +153,7 @@ def get_action_and_value( ) # send trajectories to learner with RecordTimeTo(time_dict["rollout_put_time"]): - rollout_queue.put(traj, timestep.observation, next_dones, time_dict) + rollout_queue.put(traj, timestep, time_dict) env.close() @@ -164,6 +164,8 @@ def get_learner_fn( ) -> SebulbaLearnerFn[LearnerState, PPOTransition]: """Get the learner function.""" + num_agents, num_envs = config.system.num_agents, config.arch.num_envs + # Get apply and update functions for actor and critic networks. actor_apply_fn, critic_apply_fn = apply_fns actor_update_fn, critic_update_fn = update_fns @@ -171,8 +173,6 @@ def get_learner_fn( def _update_step( learner_state: LearnerState, traj_batch: PPOTransition, - last_obs: Observation, - last_dones: chex.Array, ) -> Tuple[LearnerState, Tuple]: """A single update of the network. @@ -182,9 +182,6 @@ def _update_step( Args: learner_state (LearnerState): contains all the items needed for learning. traj_batch (PPOTransition): the batch of data to learn with. - last_obs (Observation): the final observations (for bootstrapping in GAE). - last_dones (Array): the final dones (for bootstrapping in GAE). - _ (Any): The current metrics info. """ def _calculate_gae( @@ -210,8 +207,9 @@ def _get_advantages( return advantages, advantages + traj_batch.value # Calculate advantage + last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape(num_envs, -1) params, opt_states, key, _, _ = learner_state - last_val = critic_apply_fn(params.critic_params, last_obs) + last_val = critic_apply_fn(params.critic_params, learner_state.timestep.observation) advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) def _update_epoch(update_state: Tuple, _: Any) -> Tuple: @@ -357,15 +355,12 @@ def _critic_loss_fn( ) params, opt_states, traj_batch, advantages, targets, key = update_state - learner_state = LearnerState(params, opt_states, key, None, None) + learner_state = LearnerState(params, opt_states, key, None, learner_state.timestep) metric = traj_batch.info return learner_state, (metric, loss_info) def learner_fn( - learner_state: LearnerState, - traj_batch: PPOTransition, - last_obs: Observation, - last_dones: chex.Array, + learner_state: LearnerState, traj_batch: PPOTransition ) -> ExperimentOutput[LearnerState]: """Learner function. @@ -379,11 +374,9 @@ def learner_fn( - opt_states (OptStates): The initial optimizer state. - key (chex.PRNGKey): The random number generator state. - env_state (LogEnvState): The environment state. - - timesteps (TimeStep): The initial timestep in the initial trajectory. + - timesteps (TimeStep): The last timestep of the rollout. """ - learner_state, (episode_info, loss_info) = _update_step( - learner_state, traj_batch, last_obs, last_dones - ) + learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch) return ExperimentOutput( learner_state=learner_state, @@ -498,12 +491,11 @@ def learner( for _update in range(config.system.num_updates_per_eval): with RecordTimeTo(learn_times["rollout_get_time"]): - traj_batch, last_obs, last_dones, rollout_time = pipeline.get(block=True) + traj_batch, timestep, rollout_time = pipeline.get(block=True) + learner_state = learner_state._replace(timestep=timestep) with RecordTimeTo(learn_times["learning_time"]): - learner_state, episode_metrics, train_metrics = learn( - learner_state, traj_batch, last_obs, last_dones - ) + learner_state, episode_metrics, train_metrics = learn(learner_state, traj_batch) metrics.append((episode_metrics, train_metrics)) rollout_times.append(rollout_time) diff --git a/mava/types.py b/mava/types.py index fe51ce293..8a191f5ab 100644 --- a/mava/types.py +++ b/mava/types.py @@ -153,9 +153,7 @@ class ExperimentOutput(NamedTuple, Generic[MavaState]): LearnerFn = Callable[[MavaState], ExperimentOutput[MavaState]] -SebulbaLearnerFn = Callable[ - [MavaState, MavaTransition, chex.Array, chex.Array], ExperimentOutput[MavaState] -] +SebulbaLearnerFn = Callable[[MavaState, MavaTransition], ExperimentOutput[MavaState]] ActorApply = Callable[[FrozenDict, Observation], Distribution] CriticApply = Callable[[FrozenDict, Observation], Value] RecActorApply = Callable[ diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index 9077925a8..b15edeba6 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -20,11 +20,10 @@ import jax import jax.numpy as jnp -from chex import Array +from jumanji.types import TimeStep from omegaconf import DictConfig from mava.systems.ppo.types import Params, PPOTransition # todo: remove the ppo dependencies -from mava.types import Observation, ObservationGlobalState # Copied from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py @@ -63,8 +62,7 @@ def __init__(self, max_size: int, learner_devices: List[jax.Device], lifetime: T self.lifetime = lifetime def run(self) -> None: - """ - This function ensures that trajectories on the queue are consumed in the right order. The + """This function ensures that trajectories on the queue are consumed in the right order. The start_condition and end_condition are used to ensure that only 1 thread is processing an item from the queue at one time, ensuring predictable memory usage. """ @@ -78,16 +76,8 @@ def run(self) -> None: except queue.Empty: continue - def put( - self, - traj: Sequence[PPOTransition], - next_obs: Union[Observation, ObservationGlobalState], - next_dones: Array, - time_dict: Dict, - ) -> None: - """ - Put a trajectory on the queue to be consumed by the learner. - """ + def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict) -> None: + """Put a trajectory on the queue to be consumed by the learner.""" start_condition, end_condition = (threading.Condition(), threading.Condition()) with start_condition: self.tickets_queue.put((start_condition, end_condition)) @@ -96,21 +86,18 @@ def put( # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, num_agents) sharded_traj = jax.tree.map(lambda *x: self.shard_split_playload(jnp.stack(x), 1), *traj) - # obs Tuple[(num_envs, num_agents, ...), ...] --> + # Timestep[(num_envs, num_agents, ...), ...] --> # [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices - sharded_next_obs = jax.tree.map(self.shard_split_playload, next_obs) - - # dones (num_envs, num_agents) --> - # [(num_envs / num_learner_devices, num_agents)] * num_learner_devices - sharded_next_dones = self.shard_split_playload(next_dones, 0) + sharded_timestep = jax.tree.map(self.shard_split_playload, timestep) - # If the queue gets full at any point we prioritize taking new episodes. + # If the queue gets full at any point we prioritize taking removing the oldest rollouts. # This also prevents the pipeline from stalling if the learner thread terminates - # before the actors finish putting the episodes in it. - if self._queue.full(): - self._queue.get() + # with a full queue blocking the actors from placing items in it. + with self._queue.mutex: + if self._queue.maxsize >= self._queue._qsize(): # queue is full + self._queue.get() # throw away the transition - self._queue.put((sharded_traj, sharded_next_obs, sharded_next_dones, time_dict)) + self._queue.put((sharded_traj, sharded_timestep, time_dict)) with end_condition: end_condition.notify() # tell we have finish @@ -121,7 +108,7 @@ def qsize(self) -> int: def get( self, block: bool = True, timeout: Union[float, None] = None - ) -> Tuple[PPOTransition, Union[Observation, ObservationGlobalState], Array, Dict]: + ) -> Tuple[PPOTransition, TimeStep, Dict]: """Get a trajectory from the pipeline.""" return self._queue.get(block, timeout) # type: ignore @@ -131,8 +118,7 @@ def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: class ParamsSource(threading.Thread): - """ - A `ParamSource` is a component that allows networks params to be passed from a + """A `ParamSource` is a component that allows networks params to be passed from a `Learner` component to `Actor` components. """ @@ -144,8 +130,7 @@ def __init__(self, init_value: Params, device: jax.Device, lifetime: ThreadLifet self.lifetime = lifetime def run(self) -> None: - """ - This function is responsible for updating the value of the `ParamSource` when a new value + """This function is responsible for updating the value of the `ParamSource` when a new value is available. """ while not self.lifetime.should_stop(): @@ -156,8 +141,7 @@ def run(self) -> None: continue def update(self, new_params: Params) -> None: - """ - Update the value of the `ParamSource` with a new value. + """Update the value of the `ParamSource` with a new value. Args: new_params: The new value to update the `ParamSource` with. @@ -182,6 +166,7 @@ def __exit__(self, *args: Any) -> None: def check_config(config: DictConfig) -> None: + """Checks that the given config does not have conflicting values.""" assert ( config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." From 1465133381431d5ead3d9f1189c0d434254ca7d1 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Tue, 30 Jul 2024 16:35:24 +0200 Subject: [PATCH 095/139] fix: deadlock in pipeline --- mava/utils/sebulba_utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index b15edeba6..a25d1c117 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -93,9 +93,8 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict # If the queue gets full at any point we prioritize taking removing the oldest rollouts. # This also prevents the pipeline from stalling if the learner thread terminates # with a full queue blocking the actors from placing items in it. - with self._queue.mutex: - if self._queue.maxsize >= self._queue._qsize(): # queue is full - self._queue.get() # throw away the transition + if self._queue.full(): + self._queue.get() # throw away the transition self._queue.put((sharded_traj, sharded_timestep, time_dict)) From 6689c4951157909780b63a17889f51cdc0256ee0 Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sun, 11 Aug 2024 14:16:55 +0100 Subject: [PATCH 096/139] fix: wasting samples --- mava/systems/ppo/sebulba/ff_ippo.py | 12 +++++++++++- mava/utils/sebulba_utils.py | 18 +++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index f4905c1c6..f05d3cbdc 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -13,7 +13,9 @@ # limitations under the License. import copy +import queue import threading +import warnings from queue import Queue from typing import Any, Dict, List, Sequence, Tuple @@ -153,7 +155,15 @@ def get_action_and_value( ) # send trajectories to learner with RecordTimeTo(time_dict["rollout_put_time"]): - rollout_queue.put(traj, timestep, time_dict) + try: + rollout_queue.put(traj, timestep, time_dict) + except queue.Full: + warnings.warn( + "Waited too long to add to the rollout queue, killing the actor thread", + stacklevel=2, + ) + break + env.close() diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba_utils.py index a25d1c117..041843d95 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba_utils.py @@ -83,23 +83,19 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict self.tickets_queue.put((start_condition, end_condition)) start_condition.wait() # wait to be allowed to start - # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, num_agents) + # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, ...)] sharded_traj = jax.tree.map(lambda *x: self.shard_split_playload(jnp.stack(x), 1), *traj) # Timestep[(num_envs, num_agents, ...), ...] --> # [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices sharded_timestep = jax.tree.map(self.shard_split_playload, timestep) - # If the queue gets full at any point we prioritize taking removing the oldest rollouts. - # This also prevents the pipeline from stalling if the learner thread terminates - # with a full queue blocking the actors from placing items in it. - if self._queue.full(): - self._queue.get() # throw away the transition - - self._queue.put((sharded_traj, sharded_timestep, time_dict)) - - with end_condition: - end_condition.notify() # tell we have finish + # The lock has to be released even if an exception is raised. + try: + self._queue.put((sharded_traj, sharded_timestep, time_dict), timeout=90) + finally: + with end_condition: + end_condition.notify() # tell we have finish def qsize(self) -> int: """Returns the number of trajectories in the pipeline.""" From c506da30201201599c5ee00fa4c04ef5e73157ba Mon Sep 17 00:00:00 2001 From: Louay-Ben-nessir Date: Sun, 11 Aug 2024 14:43:21 +0100 Subject: [PATCH 097/139] chore: loss unpacking --- mava/configs/arch/sebulba.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 0c1c8880d..eafeba202 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -9,7 +9,7 @@ evaluation_greedy: False # Evaluate the policy greedily. If True the policy will # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. num_eval_episodes: 200 # Number of episodes to evaluate per evaluation. -num_evaluation: 10 # Number of evenly spaced evaluations to perform during training. +num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. num_absolute_metric_eval_episodes: 32 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index f05d3cbdc..06aa268a8 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -324,10 +324,9 @@ def _critic_loss_fn( new_params = Params(actor_new_params, critic_new_params) new_opt_state = OptStates(actor_new_opt_state, critic_new_opt_state) # Pack loss info - total_loss = actor_loss_info[0] + critic_loss_info[0] - value_loss = critic_loss_info[1] - actor_loss = actor_loss_info[1][0] - entropy = actor_loss_info[1][1] + actor_total_loss, (actor_loss, entropy) = actor_loss_info + critic_total_loss, (value_loss) = critic_loss_info + total_loss = critic_total_loss + actor_total_loss loss_info = { "total_loss": total_loss, "value_loss": value_loss, From b24ac34e3ae3e38094522c915f5bd659773fc066 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 10 Oct 2024 17:13:21 +0100 Subject: [PATCH 098/139] fix: updated to work with the latest gymnasium --- mava/systems/ppo/sebulba/ff_ippo.py | 4 ++-- mava/wrappers/gym.py | 26 ++++++++++++++++++++++---- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 06aa268a8..ed85de3bf 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -406,7 +406,7 @@ def learner_setup( # create temporory envoirnments. env = environments.make_gym_env(config, config.arch.num_envs) # Get number of agents and actions. - action_space = env.unwrapped.single_action_space + action_space = env.single_action_space config.system.num_agents = len(action_space) config.system.num_actions = int(action_space[0].n) @@ -436,7 +436,7 @@ def learner_setup( ) # Initialise observation: Select only obs for a single agent. - init_obs = jnp.array([env.unwrapped.single_observation_space.sample()]) + init_obs = jnp.array([env.single_observation_space.sample()]) init_action_mask = jnp.ones((config.system.num_agents, config.system.num_actions)) init_x = Observation(init_obs, init_action_mask) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index ee4339afd..2756b3511 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -20,9 +20,10 @@ from typing import Any, Callable, Dict, Optional, Tuple, Union import gymnasium -import jax +import gymnasium.vector.async_vector_env import numpy as np from gymnasium import spaces +from gymnasium.spaces.utils import is_space_dtype_shape_equiv from gymnasium.vector.utils import write_to_shared_memory from jumanji.types import StepType, TimeStep from numpy.typing import NDArray @@ -195,9 +196,14 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: raise ValueError(f"Space {type(space)} is not currently supported.") -class GymToJumanji(gymnasium.Wrapper): +class GymToJumanji: """Converts from the Gym API to the dm_env API, using Jumanji's Timestep type.""" + def __init__(self, env: gymnasium.vector.async_vector_env): + self.env = env + self.single_action_space = env.unwrapped.single_action_space + self.single_observation_space = env.unwrapped.single_observation_space + def reset( self, seed: Optional[list[int]] = None, options: Optional[list[dict]] = None ) -> TimeStep: @@ -244,7 +250,8 @@ def _create_timestep( self, obs: NDArray, ep_done: NDArray, terminated: NDArray, rewards: NDArray, info: Dict ) -> TimeStep: obs = self._format_observation(obs, info) - extras = jax.tree.map(lambda *x: np.stack(x), *info["metrics"]) + # Filter out the masks and auxiliary data + extras = {key: value for key, value in info["metrics"].items() if key[0] != "_"} step_type = np.where(ep_done, StepType.LAST, StepType.MID) terminated = np.all(terminated, axis=1) @@ -256,6 +263,9 @@ def _create_timestep( extras=extras, ) + def close(self) -> None: + self.env.close() + # Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents @@ -321,9 +331,17 @@ def async_multiagent_worker( # CCR001 env.set_wrapper_attr(name, value) pipe.send((None, True)) elif command == "_check_spaces": + obs_mode, single_obs_space, single_action_space = data pipe.send( ( - (data[0] == observation_space, data[1] == action_space), + ( + ( + single_obs_space == observation_space + if obs_mode == "same" + else is_space_dtype_shape_equiv(single_obs_space, observation_space) + ), + single_action_space == action_space, + ), True, ) ) From 1dfb24105d0c3593e4c139e68bf7d79d91a1df2f Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 10 Oct 2024 18:32:55 +0100 Subject: [PATCH 099/139] fix: jumanji --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 0c68a3ca5..98a9f9912 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -9,7 +9,7 @@ id-marl-eval @ git+https://github.com/instadeepai/marl-eval jax jaxlib jaxmarl -jumanji @ git+https://github.com/sash-a/jumanji +jumanji @ git+https://github.com/sash-a/jumanji@old_jumanji lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration # fixes: https://github.com/semitable/lb-foraging/issues/20 matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 From fd8aece0d3590695e895f8047d916ff304c6d547 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 10 Oct 2024 18:43:56 +0100 Subject: [PATCH 100/139] fix: removed depricated gymnasium import --- mava/utils/make_env.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 405cb73b8..a5010307a 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -17,7 +17,6 @@ import gymnasium import gymnasium.vector import gymnasium.wrappers -import gymnasium.wrappers.compatibility import jaxmarl import jumanji import matrax From ae5341548738963673f9b4afc97b846bf24ec72b Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Thu, 10 Oct 2024 14:21:06 +0200 Subject: [PATCH 101/139] feat: minor refactor to sebulba utils --- mava/systems/ppo/anakin/ff_ippo.py | 2 +- mava/systems/ppo/anakin/ff_mappo.py | 2 +- mava/systems/ppo/anakin/rec_ippo.py | 2 +- mava/systems/ppo/anakin/rec_mappo.py | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 26 ++----- mava/systems/ppo/types.py | 4 +- mava/systems/q_learning/anakin/rec_iql.py | 2 +- mava/systems/sac/anakin/ff_isac.py | 2 +- mava/systems/sac/anakin/ff_masac.py | 2 +- .../{total_timestep_checker.py => config.py} | 22 ++++++ mava/utils/{sebulba_utils.py => sebulba.py} | 75 +++++++++++-------- 11 files changed, 84 insertions(+), 57 deletions(-) rename mava/utils/{total_timestep_checker.py => config.py} (67%) rename mava/utils/{sebulba_utils.py => sebulba.py} (70%) diff --git a/mava/systems/ppo/anakin/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py index 49c969cdb..6fabdd715 100644 --- a/mava/systems/ppo/anakin/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -35,13 +35,13 @@ from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, MarlEnv from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import ( merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics diff --git a/mava/systems/ppo/anakin/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py index cafa42888..ad14a2968 100644 --- a/mava/systems/ppo/anakin/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -34,9 +34,9 @@ from mava.types import ActorApply, CriticApply, ExperimentOutput, LearnerFn, MarlEnv from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics diff --git a/mava/systems/ppo/anakin/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py index 230756295..0c1a161fc 100644 --- a/mava/systems/ppo/anakin/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -48,9 +48,9 @@ ) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics diff --git a/mava/systems/ppo/anakin/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py index 53ae7c65d..a83897a07 100644 --- a/mava/systems/ppo/anakin/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -48,9 +48,9 @@ ) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index ed85de3bf..fd13bbb19 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -39,25 +39,13 @@ from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition -from mava.types import ( - ActorApply, - CriticApply, - ExperimentOutput, - Observation, - SebulbaLearnerFn, -) +from mava.types import ActorApply, CriticApply, ExperimentOutput, Observation, SebulbaLearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_sebulba_config, check_total_timesteps from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.sebulba_utils import ( - ParamsSource, - Pipeline, - RecordTimeTo, - ThreadLifetime, - check_config, -) -from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -95,7 +83,7 @@ def rollout( # Define the util functions: select action function and prepare data to share it with learner. @jax.jit def get_action_and_value( - params: FrozenDict, + params: Params, observation: Observation, key: chex.PRNGKey, ) -> Tuple: @@ -147,7 +135,8 @@ def get_action_and_value( # Append data to storage reward = timestep.reward - info = timestep.extras + info = timestep.extras # todo: [metrics]? + # todo: when logging make sure timing dict has parent timing/... traj.append( PPOTransition( cached_next_dones, action, value, reward, log_prob, cached_next_obs, info @@ -547,7 +536,7 @@ def run_experiment(_config: DictConfig) -> float: # Calculate total timesteps. config = check_total_timesteps(config) - check_config(config) + check_sebulba_config(config) steps_per_rollout = ( config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval @@ -674,6 +663,7 @@ def run_experiment(_config: DictConfig) -> float: t = int(steps_per_rollout * (eval_step + 1)) logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) abs_metric_evaluator_envs.close() + # Stop the logger. logger.stop() diff --git a/mava/systems/ppo/types.py b/mava/systems/ppo/types.py index f129b89d3..c8145b1a7 100644 --- a/mava/systems/ppo/types.py +++ b/mava/systems/ppo/types.py @@ -20,7 +20,7 @@ from optax._src.base import OptState from typing_extensions import NamedTuple -from mava.types import Action, Done, HiddenState, State, Value +from mava.types import Action, Done, HiddenState, Observation, State, Value class Params(NamedTuple): @@ -74,7 +74,7 @@ class PPOTransition(NamedTuple): value: Value reward: chex.Array log_prob: chex.Array - obs: chex.Array + obs: Observation info: Dict diff --git a/mava/systems/q_learning/anakin/rec_iql.py b/mava/systems/q_learning/anakin/rec_iql.py index 05b860d85..f37a20c5f 100644 --- a/mava/systems/q_learning/anakin/rec_iql.py +++ b/mava/systems/q_learning/anakin/rec_iql.py @@ -48,13 +48,13 @@ from mava.types import Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import ( switch_leading_axes, unreplicate_batch_dim, unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics diff --git a/mava/systems/sac/anakin/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py index 955725e00..b767c98e3 100644 --- a/mava/systems/sac/anakin/ff_isac.py +++ b/mava/systems/sac/anakin/ff_isac.py @@ -49,9 +49,9 @@ from mava.types import MarlEnv, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics diff --git a/mava/systems/sac/anakin/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py index 2df296be4..296822b3a 100644 --- a/mava/systems/sac/anakin/ff_masac.py +++ b/mava/systems/sac/anakin/ff_masac.py @@ -50,9 +50,9 @@ from mava.utils import make_env as environments from mava.utils.centralised_training import get_joint_action, get_updated_joint_actions from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics diff --git a/mava/utils/total_timestep_checker.py b/mava/utils/config.py similarity index 67% rename from mava/utils/total_timestep_checker.py rename to mava/utils/config.py index e48e40923..23484311b 100644 --- a/mava/utils/total_timestep_checker.py +++ b/mava/utils/config.py @@ -18,6 +18,28 @@ from omegaconf import DictConfig +def check_sebulba_config(config: DictConfig) -> None: + """Checks that the given config does not have conflicting values.""" + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + + assert config.arch.num_envs % len(config.arch.learner_device_ids) == 0, ( + "Number of environments must be divisible by the number of learner." + + "The output of each actor is equally split across the learners." + ) + + num_eval_samples = ( + int(config.arch.num_envs / len(config.arch.learner_device_ids)) + * config.system.rollout_length + ) + assert num_eval_samples % config.system.num_minibatches == 0, ( + f"Number of training samples per evaluator ({num_eval_samples})" + + f"must be divisible by num_minibatches ({config.system.num_minibatches})." + ) + + def check_total_timesteps(config: DictConfig) -> DictConfig: """Check if total_timesteps is set, if not, set it based on the other parameters""" diff --git a/mava/utils/sebulba_utils.py b/mava/utils/sebulba.py similarity index 70% rename from mava/utils/sebulba_utils.py rename to mava/utils/sebulba.py index 041843d95..eee211828 100644 --- a/mava/utils/sebulba_utils.py +++ b/mava/utils/sebulba.py @@ -20,13 +20,16 @@ import jax import jax.numpy as jnp +from colorama import Fore, Style +from jax import tree from jumanji.types import TimeStep -from omegaconf import DictConfig -from mava.systems.ppo.types import Params, PPOTransition # todo: remove the ppo dependencies +# todo: remove the ppo dependencies +from mava.systems.ppo.types import Params, PPOTransition + +QUEUE_PUT_TIMEOUT = 180 -# Copied from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py class ThreadLifetime: """Simple class for a mutable boolean that can be used to signal a thread to stop.""" @@ -40,6 +43,14 @@ def stop(self) -> None: self._stop = True +@jax.jit +def _stack_trajectory(trajectory: List[PPOTransition]) -> PPOTransition: + """Stack a list of parallel_env transitions into a single + transition of shape [rollout_len, num_envs, ...].""" + return tree.map(lambda *x: jnp.stack(x, axis=0), *trajectory) # type: ignore + + +# Modified from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py class Pipeline(threading.Thread): """ The `Pipeline` shards trajectories into `learner_devices`, @@ -54,6 +65,7 @@ def __init__(self, max_size: int, learner_devices: List[jax.Device], lifetime: T Args: max_size: The maximum number of trajectories to keep in the pipeline. learner_devices: The devices to shard trajectories across. + lifetime: A `ThreadLifetime` which is used to stop this thread. """ super().__init__(name="Pipeline") self.learner_devices = learner_devices @@ -83,19 +95,39 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict self.tickets_queue.put((start_condition, end_condition)) start_condition.wait() # wait to be allowed to start - # [PPOTransition()] * rollout_len --> PPOTransition[done=(rollout_len, num_envs, ...)] - sharded_traj = jax.tree.map(lambda *x: self.shard_split_playload(jnp.stack(x), 1), *traj) + # [Transition(num_envs)] * rollout_len --> Transition[done=(rollout_len, num_envs, ...)] + traj = _stack_trajectory(traj) + # Split trajectory on the num envs axis so each learner device gets a valid full rollout + sharded_traj = jax.tree.map(lambda x: self.shard_split_playload(x, axis=1), traj) # Timestep[(num_envs, num_agents, ...), ...] --> # [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices sharded_timestep = jax.tree.map(self.shard_split_playload, timestep) - # The lock has to be released even if an exception is raised. + # We block on the put to ensure that actors wait for the learners to catch up. This does two + # things: + # 1. It ensures that the actors don't get too far ahead of the learners, which could lead to + # off-policy data. + # 2. It ensures that the actors don't in a sense "waste" samples and their time by + # generating samples that the learners can't consume. + # However, we put a timeout of 180 seconds to avoid deadlocks in case the learner + # is not consuming the data. This is a safety measure and should not be hit in normal + # operation. We use a try-finally since the lock has to be released even if an exception + # is raised. try: - self._queue.put((sharded_traj, sharded_timestep, time_dict), timeout=90) + self._queue.put( + (sharded_traj, sharded_timestep, time_dict), + block=True, + timeout=QUEUE_PUT_TIMEOUT, + ) + except queue.Full: # todo: check if this is needed because we catch this exception outside + print( + f"{Fore.RED}{Style.BRIGHT}Pipeline is full and actor has timed out, " + f"this should not happen. A deadlock might be occurring{Style.RESET_ALL}" + ) finally: with end_condition: - end_condition.notify() # tell we have finish + end_condition.notify() # notify that we have finished def qsize(self) -> int: """Returns the number of trajectories in the pipeline.""" @@ -107,6 +139,11 @@ def get( """Get a trajectory from the pipeline.""" return self._queue.get(block, timeout) # type: ignore + def clear(self) -> None: + """Clear the pipeline.""" + while not self._queue.empty(): + self._queue.get() + def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: split_payload = jnp.split(payload, len(self.learner_devices), axis=axis) return jax.device_put_sharded(split_payload, devices=self.learner_devices) @@ -158,25 +195,3 @@ def __enter__(self) -> None: def __exit__(self, *args: Any) -> None: end = time.monotonic() self.to.append(end - self.start) - - -def check_config(config: DictConfig) -> None: - """Checks that the given config does not have conflicting values.""" - assert ( - config.system.num_updates > config.arch.num_evaluation - ), "Number of updates per evaluation must be less than total number of updates." - config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation - - assert config.arch.num_envs % len(config.arch.learner_device_ids) == 0, ( - "Number of environments must be divisible by the number of learner." - + "The output of each actor is equally split across the learners." - ) - - num_eval_samples = ( - int(config.arch.num_envs / len(config.arch.learner_device_ids)) - * config.system.rollout_length - ) - assert num_eval_samples % config.system.num_minibatches == 0, ( - f"Number of training samples per evaluator ({num_eval_samples})" - + f"must be divisible by num_minibatches ({config.system.num_minibatches})." - ) From 724d2dc335a81aa44cd0b845a0c83eff1ccd9d17 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Thu, 10 Oct 2024 20:25:33 +0200 Subject: [PATCH 102/139] chore: a few minor changes to code style --- mava/configs/arch/sebulba.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 287 ++++++++++++++++------------ 2 files changed, 161 insertions(+), 128 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index eafeba202..d8f44fd3c 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -16,7 +16,7 @@ absolute_metric: True # Whether the absolute metric should be computed. For more # --- Sebulba devices config --- n_threads_per_executor: 2 # num of different threads/env batches per actor -executor_device_ids: [0] # ids of actor devices +actor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices rollout_queue_size : 5 # The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index fd13bbb19..311bb263f 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -16,6 +16,7 @@ import queue import threading import warnings +from collections import defaultdict from queue import Queue from typing import Any, Dict, List, Sequence, Tuple @@ -43,7 +44,7 @@ from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.config import check_sebulba_config, check_total_timesteps -from mava.utils.jax_utils import merge_leading_dims +from mava.utils.jax_utils import merge_leading_dims, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate @@ -56,7 +57,7 @@ def rollout( rollout_queue: Pipeline, params_source: ParamsSource, apply_fns: Tuple[ActorApply, CriticApply], - actor_device_id: int, + actor_device: int, seeds: List[int], thread_lifetime: ThreadLifetime, ) -> None: @@ -69,7 +70,7 @@ def rollout( params_source (ParamsSource): Source for fetching the latest network parameters from the learner. apply_fns (Tuple): Functions for running the actor and critic networks. - actor_device_id (int): Device ID for this actor thread. + actor_device (Device): Actor device to use for rollout. seeds (List[int]): Seeds for initializing the environment. thread_lifetime (ThreadLifetime): Manages the thread's lifecycle. """ @@ -77,86 +78,85 @@ def rollout( env = environments.make_gym_env(config, config.arch.num_envs) actor_apply_fn, critic_apply_fn = apply_fns num_agents, num_envs = config.system.num_agents, config.arch.num_envs - current_actor_device = jax.devices()[actor_device_id] - move_to_device = lambda x: jax.device_put(x, device=current_actor_device) + move_to_device = lambda x: jax.device_put(x, device=actor_device) # Define the util functions: select action function and prepare data to share it with learner. @jax.jit - def get_action_and_value( + def act_fn( params: Params, observation: Observation, key: chex.PRNGKey, ) -> Tuple: """Get action and value.""" - key, subkey = jax.random.split(key) - actor_policy = actor_apply_fn(params.actor_params, observation) - action = actor_policy.sample(seed=subkey) + action = actor_policy.sample(seed=key) log_prob = actor_policy.log_prob(action) value = critic_apply_fn(params.critic_params, observation).squeeze() - return action, log_prob, value, key + return action, log_prob, value timestep = env.reset(seed=seeds) next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - # Loop till the desired num_updates is reached. - while not thread_lifetime.should_stop(): - # Rollout - traj: List[PPOTransition] = [] - time_dict: Dict[str, List[float]] = { - "single_rollout_time": [], - "env_step_time": [], - "get_params_time": [], - "rollout_put_time": [], - } - - # Loop over the rollout length - with RecordTimeTo(time_dict["single_rollout_time"]): - for _ in range(config.system.rollout_length): - with RecordTimeTo(time_dict["get_params_time"]): - # Get the latest parameters from the learner - params = params_source.get() - - cached_next_obs = tree.map(move_to_device, timestep.observation) - cached_next_dones = move_to_device(next_dones) - - # Get action and value - action, log_prob, value, key = get_action_and_value(params, cached_next_obs, key) - - # Step the environment - cpu_action = jax.device_get(action) - - with RecordTimeTo(time_dict["env_step_time"]): - # (num_env, num_agents) --> (num_agents, num_env) - timestep = env.step(cpu_action.swapaxes(0, 1)) - - next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - - # Append data to storage - reward = timestep.reward - info = timestep.extras # todo: [metrics]? - # todo: when logging make sure timing dict has parent timing/... - traj.append( - PPOTransition( - cached_next_dones, action, value, reward, log_prob, cached_next_obs, info + with jax.default_device(actor_device): + # Loop till the desired num_updates is reached. + while not thread_lifetime.should_stop(): + # Rollout + traj: List[PPOTransition] = [] + actor_timings: Dict[str, List[float]] = defaultdict(list) + # Loop over the rollout length + with RecordTimeTo(actor_timings["rollout_time"]): + for _ in range(config.system.rollout_length): + with RecordTimeTo(actor_timings["get_params_time"]): + # Get the latest parameters from the learner + params = params_source.get() + + cached_next_obs = tree.map(move_to_device, timestep.observation) + cached_next_dones = move_to_device(next_dones) + + # Get action and value + with RecordTimeTo(actor_timings["compute_action_time"]): + key, act_key = jax.random.split(key) + action, log_prob, value = act_fn(params, cached_next_obs, act_key) + cpu_action = jax.device_get(action) + + # Step environment + with RecordTimeTo(actor_timings["env_step_time"]): + # (num_env, num_agents) --> (num_agents, num_env) + timestep = env.step(cpu_action.swapaxes(0, 1)) + + next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) + + # Append data to storage + reward = timestep.reward + info = timestep.extras # todo: [metrics]? + # todo: when logging make sure timing dict has parent timing/... + traj.append( + PPOTransition( + cached_next_dones, + action, + value, + reward, + log_prob, + cached_next_obs, + info, + ) ) - ) - # send trajectories to learner - with RecordTimeTo(time_dict["rollout_put_time"]): - try: - rollout_queue.put(traj, timestep, time_dict) - except queue.Full: - warnings.warn( - "Waited too long to add to the rollout queue, killing the actor thread", - stacklevel=2, - ) - break + # send trajectories to learner + with RecordTimeTo(actor_timings["rollout_put_time"]): + try: + rollout_queue.put(traj, timestep, actor_timings) + except queue.Full: + warnings.warn( + "Waited too long to add to the rollout queue, killing the actor thread", + stacklevel=2, + ) + break env.close() -def get_learner_fn( +def get_learner_step_fn( apply_fns: Tuple[ActorApply, CriticApply], update_fns: Tuple[optax.TransformUpdateFn, optax.TransformUpdateFn], config: DictConfig, @@ -385,6 +385,54 @@ def learner_fn( return learner_fn +def learner_thread( + learn_fn: SebulbaLearnerFn[LearnerState, PPOTransition], + learner_state: LearnerState, + config: DictConfig, + eval_queue: Queue, + pipeline: Pipeline, + params_sources: Sequence[ParamsSource], +) -> None: + for _ in range(config.arch.num_evaluation): + # Create the lists to store metrics and timings for this learning iteration. + metrics: List[Tuple[Dict, Dict]] = [] + rollout_times: List[Dict] = [] + learn_times: Dict[str, List[float]] = defaultdict(list) + + with RecordTimeTo(learn_times["learner_time_per_eval"]): + for _ in range(config.system.num_updates_per_eval): + # Get the trajectory batch from the pipeline + # This is blocking so it will wait until the pipeline has data. + with RecordTimeTo(learn_times["rollout_get_time"]): + traj_batch, timestep, rollout_time = pipeline.get(block=True) + + # Replace the timestep in the learner state with the latest timestep + # This means the learner has access to the entire trajectory as well as + # an additional timestep which it can use to bootstrap. + learner_state = learner_state._replace(timestep=timestep) + # Update the networks + with RecordTimeTo(learn_times["learning_time"]): + learner_state, episode_metrics, train_metrics = learn_fn( + learner_state, traj_batch + ) + + metrics.append((episode_metrics, train_metrics)) + rollout_times.append(rollout_time) + + # Update all the params sources so all actors can get the latest params + unreplicated_params = unreplicate(learner_state.params) + for source in params_sources: + source.update(unreplicated_params) + + # Pass all the metrics and params to the main thread (evaluator) for logging and evaluation + episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) + rollout_times = tree.map(lambda *x: np.mean(x), *rollout_times) + timing_dict = rollout_times | learn_times + timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) + + eval_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) + + def learner_setup( key: chex.PRNGKey, config: DictConfig, learner_devices: List ) -> Tuple[ @@ -444,7 +492,7 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) - learn = get_learner_fn(apply_fns, update_fns, config) + learn = get_learner_step_fn(apply_fns, update_fns, config) learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) # Load model from checkpoint if specified. @@ -474,51 +522,16 @@ def learner_setup( return learn, apply_fns, init_learner_state -def learner( - learn: SebulbaLearnerFn[LearnerState, PPOTransition], - learner_state: LearnerState, - config: DictConfig, - eval_queue: Queue, - pipeline: Pipeline, - params_sources: Sequence[ParamsSource], -) -> None: - for _eval_step in range(config.arch.num_evaluation): - metrics: List[Tuple[Dict, Dict]] = [] - rollout_times: List[Dict] = [] - learn_times: Dict[str, List[float]] = {"rollout_get_time": [], "learning_time": []} - - for _update in range(config.system.num_updates_per_eval): - with RecordTimeTo(learn_times["rollout_get_time"]): - traj_batch, timestep, rollout_time = pipeline.get(block=True) - - learner_state = learner_state._replace(timestep=timestep) - with RecordTimeTo(learn_times["learning_time"]): - learner_state, episode_metrics, train_metrics = learn(learner_state, traj_batch) - - metrics.append((episode_metrics, train_metrics)) - rollout_times.append(rollout_time) - - unreplicated_params = unreplicate(learner_state.params) - - for source in params_sources: - source.update(unreplicated_params) - - # Pass to the evaluator - episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) - - rollout_times = tree.map(lambda *x: np.mean(x), *rollout_times) - timing_dict = rollout_times | learn_times - timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) - - eval_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) - - def run_experiment(_config: DictConfig) -> float: """Runs experiment.""" config = copy.deepcopy(_config) + local_devices = jax.local_devices() devices = jax.devices() + err = "Local and global devices must be the same, we dont support multihost yet" + assert len(local_devices) == len(devices), err learner_devices = [devices[d_id] for d_id in config.arch.learner_device_ids] + actor_devices = [local_devices[device_id] for device_id in config.arch.actor_device_ids] # JAX and numpy RNGs key = jax.random.PRNGKey(config.system.seed) @@ -565,36 +578,45 @@ def run_experiment(_config: DictConfig) -> float: pipe = Pipeline(config.arch.rollout_queue_size, learner_devices, pipe_lifetime) pipe.start() - param_sources: List[ParamsSource] = [] + params_sources: List[ParamsSource] = [] actor_threads: List[threading.Thread] = [] - actors_lifetime = ThreadLifetime() + actor_lifetime = ThreadLifetime() params_sources_lifetime = ThreadLifetime() # Create the actor threads - for d_idx, d_id in enumerate(config.arch.executor_device_ids): - # Loop through each executor thread + for actor_device in actor_devices: + # Create 1 params source per device + params_source = ParamsSource(inital_params, actor_device, params_sources_lifetime) + params_source.start() + params_sources.append(params_source) + # Create multiple rollout threads per actor device for thread_id in range(config.arch.n_threads_per_executor): - seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() key, act_key = jax.random.split(key) - act_key = jax.device_put(key, devices[d_id]) - - param_source = ParamsSource(inital_params, devices[d_id], params_sources_lifetime) - param_source.start() - param_sources.append(param_source) + seeds = np_rng.integers(np.iinfo(np.int32).max, size=config.arch.num_envs).tolist() + act_key = jax.device_put(key, actor_device) actor = threading.Thread( target=rollout, - args=(act_key, config, pipe, param_source, apply_fns, d_id, seeds, actors_lifetime), - name=f"Actor-{thread_id + d_idx * config.arch.n_threads_per_executor}", + args=( + act_key, + config, + pipe, + params_source, + apply_fns, + actor_device, + seeds, + actor_lifetime, + ), + name=f"Actor-{actor_device}-{thread_id}", ) actor.start() actor_threads.append(actor) eval_queue: Queue = Queue() threading.Thread( - target=learner, + target=learner_thread, name="Learner", - args=(learn, learner_state, config, eval_queue, pipe, param_sources), + args=(learn, learner_state, config, eval_queue, pipe, params_sources), ).start() max_episode_return = -jnp.inf @@ -605,17 +627,21 @@ def run_experiment(_config: DictConfig) -> float: # This loop waits for the learner to finish an update before evaluation and logging. for eval_step in range(config.arch.num_evaluation): # Get the next set of params and metrics from the learner - episode_metrics, train_metrics, learner_state, times_dict = eval_queue.get() + episode_metrics, train_metrics, learner_state, time_metrics = eval_queue.get() t = int(steps_per_rollout * (eval_step + 1)) - times_dict["timestep"] = t - logger.log(times_dict, t, eval_step, LogEvent.MISC) + time_metrics["timestep"] = t + logger.log(time_metrics, t, eval_step, LogEvent.MISC) episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) - episode_metrics["steps_per_second"] = steps_per_rollout / times_dict["single_rollout_time"] + episode_metrics["steps_per_second"] = steps_per_rollout / time_metrics["rollout_time"] if ep_completed: logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + train_metrics["learner_step"] = (eval_step + 1) * config.system.num_updates_per_eval + train_metrics["learner_steps_per_second"] = ( + config.system.num_updates_per_eval + ) / time_metrics["learner_time_per_eval"] logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) unreplicated_actor_params = unreplicate(learner_state.params.actor_params) @@ -625,11 +651,10 @@ def run_experiment(_config: DictConfig) -> float: episode_return = jnp.mean(eval_metrics["episode_return"]) - if save_checkpoint: - # Save checkpoint of learner state + if save_checkpoint: # Save a checkpoint of the learner state checkpointer.save( timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=learner_state, + unreplicated_learner_state=unreplicate_n_dims(learner_state), episode_return=episode_return, ) @@ -640,20 +665,28 @@ def run_experiment(_config: DictConfig) -> float: evaluator_envs.close() eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) - # Make sure all of the Threads are closed. - actors_lifetime.stop() + print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") + # Make sure all of the Threads are stopped. + actor_lifetime.stop() for actor in actor_threads: + # We clear the pipeline before stopping each actor thread to avoid deadlock + pipe.clear() actor.join() + print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping pipeline...{Style.RESET_ALL}") pipe_lifetime.stop() pipe.join() + print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping params sources...{Style.RESET_ALL}") params_sources_lifetime.stop() - for param_source in param_sources: - param_source.join() + for params_source in params_sources: + params_source.join() + + print(f"{Fore.MAGENTA}{Style.BRIGHT}All threads stopped...{Style.RESET_ALL}") # Measure absolute metric. if config.arch.absolute_metric: + print(f"{Fore.BLUE}{Style.BRIGHT}Measuring absolute metric...{Style.RESET_ALL}") abs_metric_evaluator, abs_metric_evaluator_envs = get_eval_fn( environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=True ) From 47b8e036f57722d7a2b98d4d0801bdd186a77c1f Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Fri, 11 Oct 2024 09:51:59 +0200 Subject: [PATCH 103/139] fix: update configs to match latest mava --- mava/configs/default/ff_ippo_sebulba.yaml | 11 +++++++++++ mava/configs/default_ff_ippo_sebulba.yaml | 7 ------- mava/configs/env/lbf_gym.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 4 +++- 4 files changed, 15 insertions(+), 9 deletions(-) create mode 100644 mava/configs/default/ff_ippo_sebulba.yaml delete mode 100644 mava/configs/default_ff_ippo_sebulba.yaml diff --git a/mava/configs/default/ff_ippo_sebulba.yaml b/mava/configs/default/ff_ippo_sebulba.yaml new file mode 100644 index 000000000..babd113ee --- /dev/null +++ b/mava/configs/default/ff_ippo_sebulba.yaml @@ -0,0 +1,11 @@ +defaults: + - logger: logger + - arch: sebulba + - system: ppo/ff_ippo + - network: mlp # [mlp, continuous_mlp, cnn] + - env: lbf_gym # [rware_gym, lbf_gym] + - _self_ + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/default_ff_ippo_sebulba.yaml b/mava/configs/default_ff_ippo_sebulba.yaml deleted file mode 100644 index 3a7386969..000000000 --- a/mava/configs/default_ff_ippo_sebulba.yaml +++ /dev/null @@ -1,7 +0,0 @@ -defaults: - - logger: ff_ippo - - arch: sebulba - - system: ppo/ff_ippo - - network: mlp - - env: lbf_gym - - _self_ diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index b0d783a7e..b6c380c9e 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,7 +1,7 @@ # ---Environment Configs--- defaults: - _self_ - - scenario: gym-lbf-2s-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] + - scenario: gym-lbf-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 311bb263f..1ce40ac8c 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -704,7 +704,9 @@ def run_experiment(_config: DictConfig) -> float: @hydra.main( - config_path="../../../configs", config_name="default_ff_ippo_sebulba.yaml", version_base="1.2" + config_path="../../../configs/default/", + config_name="ff_ippo_sebulba.yaml", + version_base="1.2", ) def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" From 8be803782724c33b466012072397762b24d0a6ac Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Fri, 11 Oct 2024 13:04:59 +0000 Subject: [PATCH 104/139] fix: reshape with multiple learners and system name --- mava/systems/ppo/sebulba/ff_ippo.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 1ce40ac8c..8db82fdea 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -164,6 +164,8 @@ def get_learner_step_fn( """Get the learner function.""" num_agents, num_envs = config.system.num_agents, config.arch.num_envs + num_learner_envs = int(num_envs // len(config.arch.learner_device_ids)) + # Get apply and update functions for actor and critic networks. actor_apply_fn, critic_apply_fn = apply_fns @@ -206,7 +208,7 @@ def _get_advantages( return advantages, advantages + traj_batch.value # Calculate advantage - last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape(num_envs, -1) + last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape(num_learner_envs, -1) params, opt_states, key, _, _ = learner_state last_val = critic_apply_fn(params.critic_params, learner_state.timestep.observation) advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) @@ -327,9 +329,7 @@ def _critic_loss_fn( params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) # Shuffle minibatches - batch_size = config.system.rollout_length * ( - config.arch.num_envs // len(config.arch.learner_device_ids) - ) + batch_size = config.system.rollout_length * num_learner_envs permutation = jax.random.permutation(shuffle_key, batch_size) batch = (traj_batch, advantages, targets) batch = tree.map(lambda x: merge_leading_dims(x, 2), batch) @@ -712,6 +712,7 @@ def hydra_entry_point(cfg: DictConfig) -> float: """Experiment entry point.""" # Allow dynamic attributes. OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "ff_ippo_sebulba" # Run experiment. eval_performance = run_experiment(cfg) From 47486364921372f3a29b8cc8dd71df5de8137246 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Fri, 11 Oct 2024 16:27:07 +0200 Subject: [PATCH 105/139] fix: safer pipeline.clear() --- mava/utils/sebulba.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index eee211828..b9d95c7f5 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -142,7 +142,10 @@ def get( def clear(self) -> None: """Clear the pipeline.""" while not self._queue.empty(): - self._queue.get() + try: + self._queue.get(block=False) + except queue.Empty: + break def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: split_payload = jnp.split(payload, len(self.learner_devices), axis=axis) From 5593bde87a3aafb2f3cc7344ef87aa446f9637f1 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Mon, 14 Oct 2024 13:59:28 +0000 Subject: [PATCH 106/139] feat: avoid unecessary host-device transfers --- mava/systems/ppo/sebulba/ff_ippo.py | 113 +++++++++++++--------------- mava/utils/sebulba.py | 12 ++- mava/wrappers/gym.py | 37 ++++++++- 3 files changed, 98 insertions(+), 64 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 8db82fdea..326c94f35 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -74,13 +74,11 @@ def rollout( seeds (List[int]): Seeds for initializing the environment. thread_lifetime (ThreadLifetime): Manages the thread's lifecycle. """ - # setup env = environments.make_gym_env(config, config.arch.num_envs) actor_apply_fn, critic_apply_fn = apply_fns num_agents, num_envs = config.system.num_agents, config.arch.num_envs move_to_device = lambda x: jax.device_put(x, device=actor_device) - # Define the util functions: select action function and prepare data to share it with learner. @jax.jit def act_fn( params: Params, @@ -96,62 +94,57 @@ def act_fn( return action, log_prob, value timestep = env.reset(seed=seeds) - next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - - with jax.default_device(actor_device): - # Loop till the desired num_updates is reached. - while not thread_lifetime.should_stop(): - # Rollout - traj: List[PPOTransition] = [] - actor_timings: Dict[str, List[float]] = defaultdict(list) - # Loop over the rollout length - with RecordTimeTo(actor_timings["rollout_time"]): - for _ in range(config.system.rollout_length): - with RecordTimeTo(actor_timings["get_params_time"]): - # Get the latest parameters from the learner - params = params_source.get() - - cached_next_obs = tree.map(move_to_device, timestep.observation) - cached_next_dones = move_to_device(next_dones) - - # Get action and value - with RecordTimeTo(actor_timings["compute_action_time"]): - key, act_key = jax.random.split(key) - action, log_prob, value = act_fn(params, cached_next_obs, act_key) - cpu_action = jax.device_get(action) - - # Step environment - with RecordTimeTo(actor_timings["env_step_time"]): - # (num_env, num_agents) --> (num_agents, num_env) - timestep = env.step(cpu_action.swapaxes(0, 1)) - - next_dones = jnp.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - - # Append data to storage - reward = timestep.reward - info = timestep.extras # todo: [metrics]? - # todo: when logging make sure timing dict has parent timing/... - traj.append( - PPOTransition( - cached_next_dones, - action, - value, - reward, - log_prob, - cached_next_obs, - info, - ) - ) - # send trajectories to learner - with RecordTimeTo(actor_timings["rollout_put_time"]): - try: - rollout_queue.put(traj, timestep, actor_timings) - except queue.Full: - warnings.warn( - "Waited too long to add to the rollout queue, killing the actor thread", - stacklevel=2, + next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) + + # with jax.default_device(actor_device): + # Loop till the desired num_updates is reached. + while not thread_lifetime.should_stop(): + # Rollout + traj: List[PPOTransition] = [] + actor_timings: Dict[str, List[float]] = defaultdict(list) + with RecordTimeTo(actor_timings["rollout_time"]): + for _ in range(config.system.rollout_length): + with RecordTimeTo(actor_timings["get_params_time"]): + # Get the latest parameters from the learner + params = params_source.get() + + cached_next_obs = tree.map(move_to_device, timestep.observation) + cached_next_dones = move_to_device(next_dones) + + # Get action and value + with RecordTimeTo(actor_timings["compute_action_time"]): + key, act_key = jax.random.split(key) + action, log_prob, value = act_fn(params, cached_next_obs, act_key) + cpu_action = jax.device_get(action) + + # Step environment + with RecordTimeTo(actor_timings["env_step_time"]): + timestep = env.step(cpu_action.swapaxes(0, 1)) + + # todo: just for fixing transfer guard, real issue is the TimeStep.last() - need to make sebulba timestep type + next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) + + # Append data to storage + # todo: when logging make sure timing dict has parent timing/... + traj.append( + PPOTransition( + cached_next_dones, + action, + value, + timestep.reward, + log_prob, + cached_next_obs, + timestep.extras, ) - break + ) + # send trajectories to learner + with RecordTimeTo(actor_timings["rollout_put_time"]): + try: + rollout_queue.put(traj, timestep, actor_timings) + except queue.Full: + err = "Waited too long to add to the rollout queue, killing the actor thread" + warnings.warn(err, stacklevel=2) + break env.close() @@ -619,7 +612,7 @@ def run_experiment(_config: DictConfig) -> float: args=(learn, learner_state, config, eval_queue, pipe, params_sources), ).start() - max_episode_return = -jnp.inf + max_episode_return = -np.inf best_params = inital_params.actor_params # This is the main loop, all it does is evaluation and logging. @@ -649,7 +642,7 @@ def run_experiment(_config: DictConfig) -> float: eval_metrics = evaluator(unreplicated_actor_params, eval_key, {}) logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) - episode_return = jnp.mean(eval_metrics["episode_return"]) + episode_return = np.mean(eval_metrics["episode_return"]) if save_checkpoint: # Save a checkpoint of the learner state checkpointer.save( @@ -663,7 +656,7 @@ def run_experiment(_config: DictConfig) -> float: max_episode_return = episode_return evaluator_envs.close() - eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + eval_performance = float(np.mean(eval_metrics[config.env.eval_metric])) print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") # Make sure all of the Threads are stopped. diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index b9d95c7f5..22753de0c 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -13,6 +13,7 @@ # limitations under the License. +from functools import partial import queue import threading import time @@ -20,6 +21,7 @@ import jax import jax.numpy as jnp +import numpy as np from colorama import Fore, Style from jax import tree from jumanji.types import TimeStep @@ -68,6 +70,7 @@ def __init__(self, max_size: int, learner_devices: List[jax.Device], lifetime: T lifetime: A `ThreadLifetime` which is used to stop this thread. """ super().__init__(name="Pipeline") + self.learner_devices = learner_devices self.tickets_queue: queue.Queue = queue.Queue() self._queue: queue.Queue = queue.Queue(maxsize=max_size) @@ -148,9 +151,14 @@ def clear(self) -> None: break def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: - split_payload = jnp.split(payload, len(self.learner_devices), axis=axis) - return jax.device_put_sharded(split_payload, devices=self.learner_devices) + return self.shard_payload(self.split_payload(payload, axis)) + + @partial(jax.jit, static_argnums=(0, 2)) + def split_payload(self, payload: Any, axis: int = 0): + return jnp.split(payload, len(self.learner_devices), axis=axis) + def shard_payload(self, payload: Any): + return jax.device_put_sharded(payload, devices=self.learner_devices) class ParamsSource(threading.Thread): """A `ParamSource` is a component that allows networks params to be passed from a diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 2756b3511..0b2dff78d 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -17,23 +17,56 @@ import warnings from multiprocessing import Queue from multiprocessing.connection import Connection -from typing import Any, Callable, Dict, Optional, Tuple, Union +from typing import Any, Callable, Dict, Optional, Tuple, Union, NamedTuple, TYPE_CHECKING +from dataclasses import field import gymnasium import gymnasium.vector.async_vector_env import numpy as np from gymnasium import spaces from gymnasium.spaces.utils import is_space_dtype_shape_equiv from gymnasium.vector.utils import write_to_shared_memory -from jumanji.types import StepType, TimeStep from numpy.typing import NDArray from mava.types import Observation, ObservationGlobalState +if TYPE_CHECKING: # https://github.com/python/mypy/issues/6239 + from dataclasses import dataclass +else: + from chex import dataclass + # Filter out the warnings warnings.filterwarnings("ignore", module="gymnasium.utils.passive_env_checker") +# needed to avoid host -> device transfers when calling TimeStep.last() +class StepType: + """Coppy of Jumanji's step type but with numpy arrays""" + + FIRST = 0 + MID = 1 + LAST = 2 + + +@dataclass +class TimeStep: + step_type: StepType + reward: NDArray + discount: NDArray + observation: Observation + extras: Dict = field(default_factory=dict) + + + def first(self) -> bool: + return self.step_type == StepType.FIRST + + def mid(self) -> bool: + return self.step_type == StepType.MID + + def last(self) -> bool: + return self.step_type == StepType.LAST + + class GymWrapper(gymnasium.Wrapper): """Base wrapper for multi-agent gym environments. This wrapper works out of the box for RobotWarehouse. From 133ea1ad1cf00a4c1f58809835111d95a3f4ee02 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Mon, 14 Oct 2024 16:02:52 +0000 Subject: [PATCH 107/139] chore: remove some more device transfers --- mava/systems/ppo/sebulba/ff_ippo.py | 4 +--- mava/wrappers/episode_metrics.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 326c94f35..cca138205 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -121,11 +121,9 @@ def act_fn( with RecordTimeTo(actor_timings["env_step_time"]): timestep = env.step(cpu_action.swapaxes(0, 1)) - # todo: just for fixing transfer guard, real issue is the TimeStep.last() - need to make sebulba timestep type next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) # Append data to storage - # todo: when logging make sure timing dict has parent timing/... traj.append( PPOTransition( cached_next_dones, @@ -623,7 +621,7 @@ def run_experiment(_config: DictConfig) -> float: episode_metrics, train_metrics, learner_state, time_metrics = eval_queue.get() t = int(steps_per_rollout * (eval_step + 1)) - time_metrics["timestep"] = t + time_metrics |= {"timestep": t, "pipline_size": pipe.qsize()} logger.log(time_metrics, t, eval_step, LogEvent.MISC) episode_metrics, ep_completed = get_final_step_metrics(episode_metrics) diff --git a/mava/wrappers/episode_metrics.py b/mava/wrappers/episode_metrics.py index e9e130819..63d65e35e 100644 --- a/mava/wrappers/episode_metrics.py +++ b/mava/wrappers/episode_metrics.py @@ -20,6 +20,7 @@ from jax import tree from jumanji.types import TimeStep from jumanji.wrappers import Wrapper +import numpy as np from mava.types import MarlEnv, State @@ -120,12 +121,12 @@ def get_final_step_metrics(metrics: Dict[str, chex.Array]) -> Tuple[Dict[str, ch expects arrays for computing summary statistics on the episode metrics. """ is_final_ep = metrics.pop("is_terminal_step") - has_final_ep_step = bool(jnp.any(is_final_ep)) + has_final_ep_step = bool(np.any(is_final_ep)) final_metrics: Dict[str, chex.Array] # If it didn't make it to the final step, return zeros. if not has_final_ep_step: - final_metrics = tree.map(jnp.zeros_like, metrics) + final_metrics = tree.map(np.zeros_like, metrics) else: final_metrics = tree.map(lambda x: x[is_final_ep], metrics) From 9260e9b52080d434da599a7d3536f2832ccb8a1c Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Mon, 14 Oct 2024 19:38:11 +0000 Subject: [PATCH 108/139] chore: better graceful exit --- mava/systems/ppo/sebulba/ff_ippo.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index cca138205..75409944c 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -659,10 +659,12 @@ def run_experiment(_config: DictConfig) -> float: print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") # Make sure all of the Threads are stopped. actor_lifetime.stop() + # We clear the pipeline before stopping the actor threads to avoid deadlock + pipe.clear() + print(f"{Fore.RED}{Style.BRIGHT}Pipe cleared: {pipe.qsize()}{Style.RESET_ALL}") for actor in actor_threads: - # We clear the pipeline before stopping each actor thread to avoid deadlock - pipe.clear() actor.join() + print(f"{Fore.RED}{Style.BRIGHT}{actor.name} stopped{Style.RESET_ALL}") print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping pipeline...{Style.RESET_ALL}") pipe_lifetime.stop() From d61dcfb4decc6790f2d8383cf80dea9601fef45c Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Tue, 15 Oct 2024 12:58:17 +0000 Subject: [PATCH 109/139] fix: create envs in main thread to avoid deadlocks --- mava/systems/ppo/sebulba/ff_ippo.py | 62 ++++++++++++++++++----------- mava/utils/logger.py | 1 + 2 files changed, 40 insertions(+), 23 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 75409944c..5208bc312 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -53,6 +53,7 @@ def rollout( key: chex.PRNGKey, + env, config: DictConfig, rollout_queue: Pipeline, params_source: ParamsSource, @@ -74,7 +75,8 @@ def rollout( seeds (List[int]): Seeds for initializing the environment. thread_lifetime (ThreadLifetime): Manages the thread's lifecycle. """ - env = environments.make_gym_env(config, config.arch.num_envs) + name = threading.current_thread().name + print(f"{Fore.BLUE}{Style.BRIGHT}Thread {name} started{Style.RESET_ALL}") actor_apply_fn, critic_apply_fn = apply_fns num_agents, num_envs = config.system.num_agents, config.arch.num_envs move_to_device = lambda x: jax.device_put(x, device=actor_device) @@ -96,7 +98,6 @@ def act_fn( timestep = env.reset(seed=seeds) next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) - # with jax.default_device(actor_device): # Loop till the desired num_updates is reached. while not thread_lifetime.should_stop(): # Rollout @@ -104,6 +105,10 @@ def act_fn( actor_timings: Dict[str, List[float]] = defaultdict(list) with RecordTimeTo(actor_timings["rollout_time"]): for _ in range(config.system.rollout_length): + # if thread_lifetime.should_stop(): + # env.close() + # return + with RecordTimeTo(actor_timings["get_params_time"]): # Get the latest parameters from the learner params = params_source.get() @@ -135,6 +140,7 @@ def act_fn( timestep.extras, ) ) + # send trajectories to learner with RecordTimeTo(actor_timings["rollout_put_time"]): try: @@ -574,8 +580,17 @@ def run_experiment(_config: DictConfig) -> float: actor_lifetime = ThreadLifetime() params_sources_lifetime = ThreadLifetime() + # Unfortunately we have to do this here, because creating envs inside the actor threads causes deadlocks + envs = [[] for i in range(len(actor_devices))] + print(f"{Fore.BLUE}{Style.BRIGHT}Starting up environments, this may take a while...{Style.RESET_ALL}") + for i in range(len(actor_devices)): + for _ in range(config.arch.n_threads_per_executor): + env = environments.make_gym_env(config, config.arch.num_envs) + envs[i].append(env) + print(f"{Fore.BLUE}{Style.BRIGHT}All environments created{Style.RESET_ALL}") + # Create the actor threads - for actor_device in actor_devices: + for dev_idx, actor_device in enumerate(actor_devices): # Create 1 params source per device params_source = ParamsSource(inital_params, actor_device, params_sources_lifetime) params_source.start() @@ -590,6 +605,7 @@ def run_experiment(_config: DictConfig) -> float: target=rollout, args=( act_key, + envs[dev_idx][thread_id], config, pipe, params_source, @@ -656,26 +672,6 @@ def run_experiment(_config: DictConfig) -> float: evaluator_envs.close() eval_performance = float(np.mean(eval_metrics[config.env.eval_metric])) - print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") - # Make sure all of the Threads are stopped. - actor_lifetime.stop() - # We clear the pipeline before stopping the actor threads to avoid deadlock - pipe.clear() - print(f"{Fore.RED}{Style.BRIGHT}Pipe cleared: {pipe.qsize()}{Style.RESET_ALL}") - for actor in actor_threads: - actor.join() - print(f"{Fore.RED}{Style.BRIGHT}{actor.name} stopped{Style.RESET_ALL}") - - print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping pipeline...{Style.RESET_ALL}") - pipe_lifetime.stop() - pipe.join() - - print(f"{Fore.MAGENTA}{Style.BRIGHT}Stopping params sources...{Style.RESET_ALL}") - params_sources_lifetime.stop() - for params_source in params_sources: - params_source.join() - - print(f"{Fore.MAGENTA}{Style.BRIGHT}All threads stopped...{Style.RESET_ALL}") # Measure absolute metric. if config.arch.absolute_metric: @@ -692,6 +688,26 @@ def run_experiment(_config: DictConfig) -> float: # Stop the logger. logger.stop() + # Ask actors to stop before running the evaluator + actor_lifetime.stop() + # We clear the pipeline before stopping the actor threads to avoid deadlock + pipe.clear() + print(f"{Fore.RED}{Style.BRIGHT}Pipe cleared: {pipe.qsize()}{Style.RESET_ALL}") + + print(f"{Fore.RED}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") + for actor in actor_threads: + actor.join() + print(f"{Fore.RED}{Style.BRIGHT}{actor.name} stopped{Style.RESET_ALL}") + + print(f"{Fore.RED}{Style.BRIGHT}Stopping pipeline...{Style.RESET_ALL}") + pipe_lifetime.stop() + pipe.join() + + print(f"{Fore.RED}{Style.BRIGHT}Stopping params sources...{Style.RESET_ALL}") + params_sources_lifetime.stop() + for params_source in params_sources: + params_source.join() + print(f"{Fore.RED}{Style.BRIGHT}All threads stopped...{Style.RESET_ALL}") return eval_performance diff --git a/mava/utils/logger.py b/mava/utils/logger.py index d7af26402..bd090604b 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -178,6 +178,7 @@ def log_stat(self, key: str, value: float, step: int, eval_step: int, event: Log if not self.detailed_logging and not is_main_metric: return + value = value.item() if isinstance(value, (jax.Array, np.ndarray)) else value self.logger[f"{event.value}/{key}"].log(value, step=step) def stop(self) -> None: From 105d796a454a99a4a5d0ab2cbc67f16b33944a25 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Tue, 15 Oct 2024 19:20:50 +0100 Subject: [PATCH 110/139] chore: use orginal rware and lbf --- mava/systems/ppo/sebulba/ff_ippo.py | 12 +++++++----- mava/utils/make_env.py | 3 +-- mava/utils/sebulba.py | 4 ++-- mava/wrappers/__init__.py | 1 - mava/wrappers/episode_metrics.py | 2 +- mava/wrappers/gym.py | 25 +++++-------------------- requirements/requirements.txt | 4 ++-- 7 files changed, 18 insertions(+), 33 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 5208bc312..2daaf30e7 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -146,7 +146,7 @@ def act_fn( try: rollout_queue.put(traj, timestep, actor_timings) except queue.Full: - err = "Waited too long to add to the rollout queue, killing the actor thread" + err = "Waited too long to add to the rollout queue, killing the actor thread" warnings.warn(err, stacklevel=2) break @@ -162,7 +162,6 @@ def get_learner_step_fn( num_agents, num_envs = config.system.num_agents, config.arch.num_envs num_learner_envs = int(num_envs // len(config.arch.learner_device_ids)) - # Get apply and update functions for actor and critic networks. actor_apply_fn, critic_apply_fn = apply_fns @@ -205,7 +204,9 @@ def _get_advantages( return advantages, advantages + traj_batch.value # Calculate advantage - last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape(num_learner_envs, -1) + last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape( + num_learner_envs, -1 + ) params, opt_states, key, _, _ = learner_state last_val = critic_apply_fn(params.critic_params, learner_state.timestep.observation) advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) @@ -582,7 +583,9 @@ def run_experiment(_config: DictConfig) -> float: # Unfortunately we have to do this here, because creating envs inside the actor threads causes deadlocks envs = [[] for i in range(len(actor_devices))] - print(f"{Fore.BLUE}{Style.BRIGHT}Starting up environments, this may take a while...{Style.RESET_ALL}") + print( + f"{Fore.BLUE}{Style.BRIGHT}Starting up environments, this may take a while...{Style.RESET_ALL}" + ) for i in range(len(actor_devices)): for _ in range(config.arch.n_threads_per_executor): env = environments.make_gym_env(config, config.arch.num_envs) @@ -672,7 +675,6 @@ def run_experiment(_config: DictConfig) -> float: evaluator_envs.close() eval_performance = float(np.mean(eval_metrics[config.env.eval_metric])) - # Measure absolute metric. if config.arch.absolute_metric: print(f"{Fore.BLUE}{Style.BRIGHT}Measuring absolute metric...{Style.RESET_ALL}") diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index a5010307a..1d71ddce0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -46,7 +46,6 @@ ConnectorWrapper, GigastepWrapper, GymAgentIDWrapper, - GymLBFWrapper, GymRecordEpisodeMetrics, GymToJumanji, GymWrapper, @@ -78,7 +77,7 @@ _gym_registry = { "RobotWarehouse": (gym_Warehouse, GymWrapper), - "LevelBasedForaging": (gym_ForagingEnv, GymLBFWrapper), + "LevelBasedForaging": (gym_ForagingEnv, GymWrapper), } diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index 22753de0c..cead3b6ba 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -13,15 +13,14 @@ # limitations under the License. -from functools import partial import queue import threading import time +from functools import partial from typing import Any, Dict, List, Sequence, Tuple, Union import jax import jax.numpy as jnp -import numpy as np from colorama import Fore, Style from jax import tree from jumanji.types import TimeStep @@ -160,6 +159,7 @@ def split_payload(self, payload: Any, axis: int = 0): def shard_payload(self, payload: Any): return jax.device_put_sharded(payload, devices=self.learner_devices) + class ParamsSource(threading.Thread): """A `ParamSource` is a component that allows networks params to be passed from a `Learner` component to `Actor` components. diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index a7b56c5da..f8cf8a64c 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -18,7 +18,6 @@ from mava.wrappers.gigastep import GigastepWrapper from mava.wrappers.gym import ( GymAgentIDWrapper, - GymLBFWrapper, GymRecordEpisodeMetrics, GymToJumanji, GymWrapper, diff --git a/mava/wrappers/episode_metrics.py b/mava/wrappers/episode_metrics.py index 63d65e35e..f4c34002e 100644 --- a/mava/wrappers/episode_metrics.py +++ b/mava/wrappers/episode_metrics.py @@ -17,10 +17,10 @@ import chex import jax import jax.numpy as jnp +import numpy as np from jax import tree from jumanji.types import TimeStep from jumanji.wrappers import Wrapper -import numpy as np from mava.types import MarlEnv, State diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 0b2dff78d..39870b211 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -15,11 +15,11 @@ import sys import traceback import warnings +from dataclasses import field from multiprocessing import Queue from multiprocessing.connection import Connection -from typing import Any, Callable, Dict, Optional, Tuple, Union, NamedTuple, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union -from dataclasses import field import gymnasium import gymnasium.vector.async_vector_env import numpy as np @@ -56,7 +56,6 @@ class TimeStep: observation: Observation extras: Dict = field(default_factory=dict) - def first(self) -> bool: return self.step_type == StepType.FIRST @@ -69,8 +68,7 @@ def last(self) -> bool: class GymWrapper(gymnasium.Wrapper): """Base wrapper for multi-agent gym environments. - This wrapper works out of the box for RobotWarehouse. - See `GymLBFWrapper` for how it can be modified to work for other environments. + This wrapper works out of the box for RobotWarehouse and level based foraging. """ def __init__( @@ -131,18 +129,6 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class GymLBFWrapper(GymWrapper): - """Wrapper for the gym level based foraging environment.""" - - def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: - agents_view, reward, terminated, truncated, info = super().step(actions) - - truncated = np.repeat(truncated, self.num_agents) - terminated = np.repeat(terminated, self.num_agents) - - return agents_view, reward, terminated, truncated, info - - class GymRecordEpisodeMetrics(gymnasium.Wrapper): """Record the episode returns and lengths.""" @@ -247,7 +233,7 @@ def reset( ep_done = np.zeros(num_envs, dtype=float) rewards = np.zeros((num_envs, num_agents), dtype=float) - teminated = np.zeros((num_envs, num_agents), dtype=float) + teminated = np.zeros(num_envs, dtype=float) timestep = self._create_timestep(obs, ep_done, teminated, rewards, info) @@ -256,7 +242,7 @@ def reset( def step(self, action: list) -> TimeStep: obs, rewards, terminated, truncated, info = self.env.step(action) - ep_done = np.logical_or(terminated, truncated).all(axis=1) + ep_done = np.logical_or(terminated, truncated) timestep = self._create_timestep(obs, ep_done, terminated, rewards, info) @@ -286,7 +272,6 @@ def _create_timestep( # Filter out the masks and auxiliary data extras = {key: value for key, value in info["metrics"].items() if key[0] != "_"} step_type = np.where(ep_done, StepType.LAST, StepType.MID) - terminated = np.all(terminated, axis=1) return TimeStep( step_type=step_type, diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 71432102f..61f7fe68a 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -11,7 +11,7 @@ jax==0.4.30 jaxlib==0.4.30 jaxmarl jumanji @ git+https://github.com/sash-a/jumanji@old_jumanji # Includes a few extra MARL envs -lbforaging @ git+https://github.com/LukasSchaefer/lb-foraging.git@gymnasium_integration # fixes: https://github.com/semitable/lb-foraging/issues/20 +lbforaging matrax @ git+https://github.com/instadeepai/matrax mujoco==3.1.3 mujoco-mjx==3.1.3 @@ -20,7 +20,7 @@ numpy==1.26.4 omegaconf optax protobuf~=3.20 -rware @ git+https://github.com/RuanJohn/robotic-warehouse.git # compatibility with latest gymnasium +rware scipy==1.12.0 tensorboard_logger tensorflow_probability From f292bf303d42e66eb28775bbf6f4a9d52f6f338c Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Wed, 16 Oct 2024 12:48:27 +0200 Subject: [PATCH 111/139] fix: possible off by one fix --- mava/systems/ppo/sebulba/ff_ippo.py | 51 ++++++++++++++--------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 2daaf30e7..b0a74f716 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -81,6 +81,8 @@ def rollout( num_agents, num_envs = config.system.num_agents, config.arch.num_envs move_to_device = lambda x: jax.device_put(x, device=actor_device) + key = move_to_device(key) + @jax.jit def act_fn( params: Params, @@ -96,7 +98,7 @@ def act_fn( return action, log_prob, value timestep = env.reset(seed=seeds) - next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) + dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) # Loop till the desired num_updates is reached. while not thread_lifetime.should_stop(): @@ -105,38 +107,33 @@ def act_fn( actor_timings: Dict[str, List[float]] = defaultdict(list) with RecordTimeTo(actor_timings["rollout_time"]): for _ in range(config.system.rollout_length): - # if thread_lifetime.should_stop(): - # env.close() - # return - with RecordTimeTo(actor_timings["get_params_time"]): # Get the latest parameters from the learner params = params_source.get() - cached_next_obs = tree.map(move_to_device, timestep.observation) - cached_next_dones = move_to_device(next_dones) + obs_tpu = tree.map(move_to_device, timestep.observation) # Get action and value with RecordTimeTo(actor_timings["compute_action_time"]): key, act_key = jax.random.split(key) - action, log_prob, value = act_fn(params, cached_next_obs, act_key) + action, log_prob, value = act_fn(params, obs_tpu, act_key) cpu_action = jax.device_get(action) # Step environment with RecordTimeTo(actor_timings["env_step_time"]): timestep = env.step(cpu_action.swapaxes(0, 1)) - next_dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) + dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) # Append data to storage traj.append( PPOTransition( - cached_next_dones, + dones, action, value, timestep.reward, log_prob, - cached_next_obs, + obs_tpu, timestep.extras, ) ) @@ -182,21 +179,24 @@ def _update_step( """ def _calculate_gae( - traj_batch: PPOTransition, last_val: chex.Array, last_done: chex.Array + traj_batch: PPOTransition, last_val: chex.Array ) -> Tuple[chex.Array, chex.Array]: - def _get_advantages( - carry: Tuple[chex.Array, chex.Array, chex.Array], transition: PPOTransition - ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: - gae, next_value, next_done = carry + """Calculate the GAE.""" + + gamma, gae_lambda = config.system.gamma, config.system.gae_lambda + + def _get_advantages(gae_and_next_value: Tuple, transition: PPOTransition) -> Tuple: + """Calculate the GAE for a single transition.""" + gae, next_value = gae_and_next_value done, value, reward = transition.done, transition.value, transition.reward - gamma = config.system.gamma - delta = reward + gamma * next_value * (1 - next_done) - value - gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae - return (gae, value, done), gae + + delta = reward + gamma * next_value * (1 - done) - value + gae = delta + gamma * gae_lambda * (1 - done) * gae + return (gae, value), gae _, advantages = jax.lax.scan( _get_advantages, - (jnp.zeros_like(last_val), last_val, last_done), + (jnp.zeros_like(last_val), last_val), traj_batch, reverse=True, unroll=16, @@ -204,12 +204,9 @@ def _get_advantages( return advantages, advantages + traj_batch.value # Calculate advantage - last_dones = jnp.repeat(learner_state.timestep.last(), num_agents).reshape( - num_learner_envs, -1 - ) - params, opt_states, key, _, _ = learner_state - last_val = critic_apply_fn(params.critic_params, learner_state.timestep.observation) - advantages, targets = _calculate_gae(traj_batch, last_val, last_dones) + params, opt_states, key, _, final_timestep = learner_state + last_val = critic_apply_fn(params.critic_params, final_timestep.observation) + advantages, targets = _calculate_gae(traj_batch, last_val) def _update_epoch(update_state: Tuple, _: Any) -> Tuple: """Update the network for a single epoch.""" From d42d7328bea97c1fd81faf17a1ef296b78385b2e Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Wed, 16 Oct 2024 16:26:05 +0200 Subject: [PATCH 112/139] fix: change to using gym.make to create envs and fix StepType --- mava/configs/default/ff_ippo_sebulba.yaml | 2 +- mava/configs/env/lbf_gym.yaml | 6 ++++-- mava/configs/env/rware_gym.yaml | 4 +++- .../env/scenario/gym-lbf-10x10-3p-3f.yaml | 18 ------------------ .../env/scenario/gym-lbf-15x15-3p-5f.yaml | 18 ------------------ .../env/scenario/gym-lbf-15x15-4p-3f.yaml | 18 ------------------ .../env/scenario/gym-lbf-15x15-4p-5f.yaml | 18 ------------------ .../env/scenario/gym-lbf-2s-10x10-3p-3f.yaml | 18 ------------------ .../scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml | 18 ------------------ .../env/scenario/gym-lbf-8x8-2p-2f-coop.yaml | 18 ------------------ .../env/scenario/gym-rware-small-4ag.yaml | 18 ------------------ .../env/scenario/gym-rware-tiny-2ag.yaml | 18 ------------------ .../env/scenario/gym-rware-tiny-4ag-easy.yaml | 18 ------------------ .../env/scenario/gym-rware-tiny-4ag.yaml | 18 ------------------ mava/utils/make_env.py | 12 ++++++------ mava/wrappers/gym.py | 9 ++++++--- 16 files changed, 20 insertions(+), 211 deletions(-) delete mode 100644 mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml delete mode 100644 mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml delete mode 100644 mava/configs/env/scenario/gym-rware-small-4ag.yaml delete mode 100644 mava/configs/env/scenario/gym-rware-tiny-2ag.yaml delete mode 100644 mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml delete mode 100644 mava/configs/env/scenario/gym-rware-tiny-4ag.yaml diff --git a/mava/configs/default/ff_ippo_sebulba.yaml b/mava/configs/default/ff_ippo_sebulba.yaml index babd113ee..7669049b1 100644 --- a/mava/configs/default/ff_ippo_sebulba.yaml +++ b/mava/configs/default/ff_ippo_sebulba.yaml @@ -3,7 +3,7 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp # [mlp, continuous_mlp, cnn] - - env: lbf_gym # [rware_gym, lbf_gym] + - env: rware_gym # [rware_gym, lbf_gym] - _self_ hydra: diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index b6c380c9e..39d624daa 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -1,16 +1,18 @@ # ---Environment Configs--- defaults: - _self_ - - scenario: gym-lbf-8x8-2p-2f-coop # [gym-lbf-2s-8x8-2p-2f-coop, gym-lbf-8x8-2p-2f-coop, gym-lbf-2s-10x10-3p-3f, gym-lbf-10x10-3p-3f, gym-lbf-15x15-3p-5f, gym-lbf-15x15-4p-3f, gym-lbf-15x15-4p-5f] env_name: LevelBasedForaging # Used for logging purposes. +scenario: + name: lbforaging + task_name: Foraging-8x8-2p-1f-v3 # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. eval_metric: episode_return # Whether the add agents IDs to the observations returned by the environment. -add_agent_id : False +add_agent_id: False # Whether or not to log the winrate of this environment. log_win_rate: False diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index 87bd3a473..da8c73402 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -1,9 +1,11 @@ # ---Environment Configs--- defaults: - _self_ - - scenario: gym-rware-tiny-2ag # [gym-rware-tiny-2ag, gym-rware-tiny-4ag, gym-rware-tiny-4ag-easy, gym-rware-small-4ag] env_name: RobotWarehouse # Used for logging purposes. +scenario: + name: rware + task_name: rware-tiny-2ag-v2 # [rware-tiny-2ag-v2, rware-tiny-4ag-v2, rware-tiny-4ag-easy-v2, rware-small-4ag-v2] # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. diff --git a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml deleted file mode 100644 index a2150115b..000000000 --- a/mava/configs/env/scenario/gym-lbf-10x10-3p-3f.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 10x10-3p-3f scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 10x10-3p-3f - -task_config: - field_size: [10,10] - sight: 10 - players: 3 - max_num_food: 3 - max_player_level: 2 - force_coop: False - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml deleted file mode 100644 index 70031bad0..000000000 --- a/mava/configs/env/scenario/gym-lbf-15x15-3p-5f.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 15x15-3p-5f scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 15x15-3p-5f - -task_config: - field_size: [15, 15] - sight: 15 - players: 3 - max_num_food: 5 - max_player_level: 2 - force_coop: False - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml deleted file mode 100644 index b1fe6e4be..000000000 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-3f.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 15x15-4p-3f scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 15x15-4p-3f - -task_config: - field_size: [15, 15] - sight: 15 - players: 4 - max_num_food: 3 - max_player_level: 2 - force_coop: False - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml b/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml deleted file mode 100644 index 9ce0100f5..000000000 --- a/mava/configs/env/scenario/gym-lbf-15x15-4p-5f.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 15x15-4p-5f scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 15x15-4p-5f - -task_config: - field_size: [15, 15] - sight: 15 - players: 4 - max_num_food: 5 - max_player_level: 2 - force_coop: False - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml b/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml deleted file mode 100644 index fea817887..000000000 --- a/mava/configs/env/scenario/gym-lbf-2s-10x10-3p-3f.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 2s10x10-3p-3f scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 2s-10x10-3p-3f - -task_config: - field_size: [10, 10] - sight: 2 - players: 3 - max_num_food: 3 - max_player_level: 2 - force_coop: False - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml deleted file mode 100644 index b0cacb95c..000000000 --- a/mava/configs/env/scenario/gym-lbf-2s-8x8-2p-2f-coop.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 2s-8x8-2p-2f-coop scenario with the VectorObserver set as default. -name: LevelBasedForaging -task_name: 2s-8x8-2p-2f-coop - -task_config: - field_size: [8, 8] # size of the grid to generate. - sight: 2 # field of view of an agent. - players: 2 # number of agents on the grid. - max_num_food: 2 # number of food in the environment. - max_player_level: 2 # maximum level of the agents (inclusive). - force_coop: True # force cooperation between agents. - max_episode_steps: 100 # max number of steps per episode. - min_player_level : 1 # minimum level of the agents (inclusive). - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml b/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml deleted file mode 100644 index 3b9cee314..000000000 --- a/mava/configs/env/scenario/gym-lbf-8x8-2p-2f-coop.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the 8x8-2p-2f-coop scenario with the VectorObserver set as default -name: LevelBasedForaging -task_name: 8x8-2p-2f-coop - -task_config: - field_size: [8, 8] - sight: 8 - players: 2 - max_num_food: 2 - max_player_level: 2 - force_coop: True - max_episode_steps: 100 - min_player_level : 1 - min_food_level : null - max_food_level : null - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-small-4ag.yaml b/mava/configs/env/scenario/gym-rware-small-4ag.yaml deleted file mode 100644 index 39f8efa4e..000000000 --- a/mava/configs/env/scenario/gym-rware-small-4ag.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the small-4ag environment -name: RobotWarehouse -task_name: small-4ag - -task_config: - column_height: 8 - shelf_rows: 2 - shelf_columns: 3 - n_agents: 4 - sensor_range: 1 - request_queue_size: 4 - msg_bits : 0 - max_inactivity_steps : null - max_steps : 500 - reward_type : 0 - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml deleted file mode 100644 index 95ef11fc2..000000000 --- a/mava/configs/env/scenario/gym-rware-tiny-2ag.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the tiny-2ag environment -name: RobotWarehouse -task_name: tiny-2ag - -task_config: - column_height: 8 - shelf_rows: 1 - shelf_columns: 3 - n_agents: 2 - sensor_range: 1 - request_queue_size: 2 - msg_bits : 0 - max_inactivity_steps : null - max_steps : 500 - reward_type : 0 - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml deleted file mode 100644 index 7753b73ec..000000000 --- a/mava/configs/env/scenario/gym-rware-tiny-4ag-easy.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the tiny-4ag-easy environment -name: RobotWarehouse -task_name: tiny-4ag-easy - -task_config: - column_height: 8 - shelf_rows: 1 - shelf_columns: 3 - n_agents: 4 - sensor_range: 1 - request_queue_size: 8 - msg_bits : 0 - max_inactivity_steps : null - max_steps : 500 - reward_type : 0 - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml b/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml deleted file mode 100644 index c28cf92c5..000000000 --- a/mava/configs/env/scenario/gym-rware-tiny-4ag.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# The config of the tiny_4ag environment -name: RobotWarehouse -task_name: tiny-4ag - -task_config: - column_height: 8 - shelf_rows: 1 - shelf_columns: 3 - n_agents: 4 - sensor_range: 1 - request_queue_size: 4 - msg_bits : 0 - max_inactivity_steps : null - max_steps : 500 - reward_type : 0 - -env_kwargs: - {} # there are no scenario specific env_kwargs for this env diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 1d71ddce0..1c9e4dbd0 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -15,6 +15,7 @@ from typing import Dict, Tuple, Type import gymnasium +import gymnasium as gym import gymnasium.vector import gymnasium.wrappers import jaxmarl @@ -34,9 +35,7 @@ from jumanji.environments.routing.robot_warehouse.generator import ( RandomGenerator as RwareRandomGenerator, ) -from lbforaging.foraging import ForagingEnv as gym_ForagingEnv from omegaconf import DictConfig -from rware.warehouse import Warehouse as gym_Warehouse from mava.types import MarlEnv from mava.wrappers import ( @@ -76,8 +75,8 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} _gym_registry = { - "RobotWarehouse": (gym_Warehouse, GymWrapper), - "LevelBasedForaging": (gym_ForagingEnv, GymWrapper), + "RobotWarehouse": GymWrapper, + "LevelBasedForaging": GymWrapper, } @@ -243,10 +242,11 @@ def make_gym_env( Returns: Async environments. """ - env_maker, wrapper = _gym_registry[config.env.scenario.name] + wrapper = _gym_registry[config.env.env_name] def create_gym_env(config: DictConfig, add_global_state: bool = False) -> gymnasium.Env: - env = env_maker(**config.env.scenario.task_config) + registered_name = f"{config.env.scenario.name}:{config.env.scenario.task_name}" + env = gym.make(registered_name, disable_env_checker=False) wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) if config.env.add_agent_id: wrapped_env = GymAgentIDWrapper(wrapped_env) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 39870b211..a27b246ce 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -16,6 +16,7 @@ import traceback import warnings from dataclasses import field +from enum import IntEnum from multiprocessing import Queue from multiprocessing.connection import Connection from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union @@ -40,7 +41,7 @@ # needed to avoid host -> device transfers when calling TimeStep.last() -class StepType: +class StepType(IntEnum): """Coppy of Jumanji's step type but with numpy arrays""" FIRST = 0 @@ -53,7 +54,7 @@ class TimeStep: step_type: StepType reward: NDArray discount: NDArray - observation: Observation + observation: Union[Observation, ObservationGlobalState] extras: Dict = field(default_factory=dict) def first(self) -> bool: @@ -94,7 +95,9 @@ def __init__( def reset( self, seed: Optional[int] = None, options: Optional[dict] = None ) -> Tuple[NDArray, Dict]: - if seed is not None: + # todo: maybe we should just remove this? I think the hasattr could be slow and the + # `OrderEnforcingWrapper` blocks the seed call :/ + if seed is not None and hasattr(self.env, "seed"): self.env.seed(seed) agents_view, info = self._env.reset() From d4359c1cf6ac91415f8f3ae64a89959b4c317139 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 17 Oct 2024 13:52:44 +0100 Subject: [PATCH 113/139] feat: learner env accumulation --- mava/configs/arch/sebulba.yaml | 1 + mava/systems/ppo/sebulba/ff_ippo.py | 31 +++++++++++++++++++++-------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index d8f44fd3c..278b0592d 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -18,6 +18,7 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 2 # num of different threads/env batches per actor actor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices +n_learner_accumulate: 1 # Number of envoirnments to accumulate before updating the parameters. This determines the num_envs for learning updates which equals (num_envs * n_learner_accumulate) / len(learner_device_ids). rollout_queue_size : 5 # The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. # Too large of a value with too many actors will lead to all of the updates getting wasted in old episodes diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index b0a74f716..a0026d95c 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -396,23 +396,38 @@ def learner_thread( with RecordTimeTo(learn_times["learner_time_per_eval"]): for _ in range(config.system.num_updates_per_eval): - # Get the trajectory batch from the pipeline - # This is blocking so it will wait until the pipeline has data. - with RecordTimeTo(learn_times["rollout_get_time"]): - traj_batch, timestep, rollout_time = pipeline.get(block=True) + # Accumulate the batches, timesteps, and rollout times + accumulated_traj_batches = [] + accumulated_timesteps = [] + + for _ in range(config.arch.n_learner_accumulate): + # Get the trajectory batch from the pipeline + # This is blocking so it will wait until the pipeline has data. + with RecordTimeTo(learn_times["rollout_get_time"]): + traj_batch, timestep, rollout_time = pipeline.get(block=True) + + # Store the retrieved data + accumulated_traj_batches.append(traj_batch) + accumulated_timesteps.append(timestep) + rollout_times.append(rollout_time) + + # Concatenate accumulated timesteps and trajectory batches on the num_envs axis + combined_traj_batch = jax.tree.map(lambda *x: jnp.concat(x, axis=2), *accumulated_traj_batches) + combined_timesteps = jax.tree.map(lambda *x: jnp.concat(x, axis=1), *accumulated_timesteps) + # Replace the timestep in the learner state with the latest timestep # This means the learner has access to the entire trajectory as well as # an additional timestep which it can use to bootstrap. - learner_state = learner_state._replace(timestep=timestep) + learner_state = learner_state._replace(timestep=combined_timesteps) # Update the networks with RecordTimeTo(learn_times["learning_time"]): learner_state, episode_metrics, train_metrics = learn_fn( - learner_state, traj_batch + learner_state, combined_traj_batch ) - + metrics.append((episode_metrics, train_metrics)) - rollout_times.append(rollout_time) + # Update all the params sources so all actors can get the latest params unreplicated_params = unreplicate(learner_state.params) From 7c784788ba6e7f59f27f8361a91c52de43bd03ed Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Thu, 17 Oct 2024 14:07:17 +0000 Subject: [PATCH 114/139] feat: jit evaluation on cpu --- mava/evaluator.py | 2 ++ mava/systems/ppo/sebulba/ff_ippo.py | 19 ++++++------------- mava/wrappers/gym.py | 6 ++---- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index a306157ed..99d4eb8d4 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -239,6 +239,8 @@ def get_sebulba_eval_fn( episode_loops = math.ceil(eval_episodes / n_parallel_envs) env = env_maker(config, n_parallel_envs) + act_fn = jax.jit(act_fn, device=jax.devices('cpu')[0]) # cpu so that we don't block actors/learners + # Warnings if num eval episodes is not divisible by num parallel envs. if eval_episodes % n_parallel_envs != 0: warnings.warn( diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index b0a74f716..1f5aad316 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -50,7 +50,6 @@ from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics - def rollout( key: chex.PRNGKey, env, @@ -81,8 +80,6 @@ def rollout( num_agents, num_envs = config.system.num_agents, config.arch.num_envs move_to_device = lambda x: jax.device_put(x, device=actor_device) - key = move_to_device(key) - @jax.jit def act_fn( params: Params, @@ -579,6 +576,7 @@ def run_experiment(_config: DictConfig) -> float: params_sources_lifetime = ThreadLifetime() # Unfortunately we have to do this here, because creating envs inside the actor threads causes deadlocks + # todo: see what happens if we do this in the thread creating loop envs = [[] for i in range(len(actor_devices))] print( f"{Fore.BLUE}{Style.BRIGHT}Starting up environments, this may take a while...{Style.RESET_ALL}" @@ -633,7 +631,7 @@ def run_experiment(_config: DictConfig) -> float: # Acting and learning is happening in their own threads. # This loop waits for the learner to finish an update before evaluation and logging. for eval_step in range(config.arch.num_evaluation): - # Get the next set of params and metrics from the learner + # Sync with the learner - the get() is blocking so it keeps eval and learning in step. episode_metrics, train_metrics, learner_state, time_metrics = eval_queue.get() t = int(steps_per_rollout * (eval_step + 1)) @@ -653,7 +651,7 @@ def run_experiment(_config: DictConfig) -> float: unreplicated_actor_params = unreplicate(learner_state.params.actor_params) key, eval_key = jax.random.split(key, 2) - eval_metrics = evaluator(unreplicated_actor_params, eval_key, {}) + eval_metrics = evaluator(jax.device_get(unreplicated_actor_params), eval_key, {}) logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) episode_return = np.mean(eval_metrics["episode_return"]) @@ -685,23 +683,18 @@ def run_experiment(_config: DictConfig) -> float: logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) abs_metric_evaluator_envs.close() - # Stop the logger. + # Stop all the threads. logger.stop() - # Ask actors to stop before running the evaluator actor_lifetime.stop() - # We clear the pipeline before stopping the actor threads to avoid deadlock - pipe.clear() - print(f"{Fore.RED}{Style.BRIGHT}Pipe cleared: {pipe.qsize()}{Style.RESET_ALL}") - + pipe.clear() # We clear the pipeline before stopping the actor threads to avoid deadlock + print(f"{Fore.RED}{Style.BRIGHT}Pipe cleared{Style.RESET_ALL}") print(f"{Fore.RED}{Style.BRIGHT}Stopping actor threads...{Style.RESET_ALL}") for actor in actor_threads: actor.join() print(f"{Fore.RED}{Style.BRIGHT}{actor.name} stopped{Style.RESET_ALL}") - print(f"{Fore.RED}{Style.BRIGHT}Stopping pipeline...{Style.RESET_ALL}") pipe_lifetime.stop() pipe.join() - print(f"{Fore.RED}{Style.BRIGHT}Stopping params sources...{Style.RESET_ALL}") params_sources_lifetime.stop() for params_source in params_sources: diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index a27b246ce..048294893 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -95,10 +95,8 @@ def __init__( def reset( self, seed: Optional[int] = None, options: Optional[dict] = None ) -> Tuple[NDArray, Dict]: - # todo: maybe we should just remove this? I think the hasattr could be slow and the - # `OrderEnforcingWrapper` blocks the seed call :/ - if seed is not None and hasattr(self.env, "seed"): - self.env.seed(seed) + if seed is not None: + self.env.unwrapped.seed(seed) agents_view, info = self._env.reset() From c252ffeffa7169b378638cdd64604de29966e5e5 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 17 Oct 2024 15:13:48 +0100 Subject: [PATCH 115/139] fix: timestep calculation with accumulation --- mava/systems/ppo/sebulba/ff_ippo.py | 2 +- mava/utils/config.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 95566efea..639ff1fe0 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -559,7 +559,7 @@ def run_experiment(_config: DictConfig) -> float: check_sebulba_config(config) steps_per_rollout = ( - config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval + config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval * config.arch.n_learner_accumulate ) # Logger setup diff --git a/mava/utils/config.py b/mava/utils/config.py index 23484311b..34a35f091 100644 --- a/mava/utils/config.py +++ b/mava/utils/config.py @@ -46,9 +46,11 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: if config.arch.architecture_name == "anakin": n_devices = len(jax.devices()) update_batch_size = config.system.update_batch_size + n_accumulate = 1 # We dont accumulate envs in anakin else: n_devices = 1 # We only use a single device's output when updating. update_batch_size = 1 + n_accumulate = config.arch.n_learner_accumulate if config.system.total_timesteps is None: config.system.num_updates = int(config.system.num_updates) @@ -58,6 +60,7 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: * config.system.rollout_length * update_batch_size * config.arch.num_envs + * n_accumulate ) else: config.system.total_timesteps = int(config.system.total_timesteps) @@ -67,6 +70,7 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: // update_batch_size // config.arch.num_envs // n_devices + // n_accumulate ) print( f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " From fd7a0255d45b53691b486e39f1f59ace058a6bf7 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Thu, 17 Oct 2024 20:56:56 +0000 Subject: [PATCH 116/139] feat: shardmap almost working --- mava/systems/ppo/sebulba/ff_ippo.py | 25 +++++++++++++++++++---- mava/utils/sebulba.py | 31 ++++++++++++++--------------- 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 639ff1fe0..e47a91c87 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -26,6 +26,10 @@ import jax import jax.debug import jax.numpy as jnp +from jax.sharding import Mesh, PartitionSpec as P +from jax.sharding import NamedSharding +from jax.experimental import mesh_utils +from jax.experimental.shard_map import shard_map import numpy as np import optax from colorama import Fore, Style @@ -409,8 +413,8 @@ def learner_thread( rollout_times.append(rollout_time) # Concatenate accumulated timesteps and trajectory batches on the num_envs axis - combined_traj_batch = jax.tree.map(lambda *x: jnp.concat(x, axis=2), *accumulated_traj_batches) - combined_timesteps = jax.tree.map(lambda *x: jnp.concat(x, axis=1), *accumulated_timesteps) + combined_traj_batch = jax.tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_traj_batches) + combined_timesteps = jax.tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_timesteps) # Replace the timestep in the learner state with the latest timestep @@ -454,6 +458,9 @@ def learner_setup( config.system.num_agents = len(action_space) config.system.num_actions = int(action_space[0].n) + devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) + mesh = Mesh(devices, axis_names=("learner_devices",)) + # PRNG keys. key, actor_key, critic_key = jax.random.split(key, 3) @@ -500,7 +507,13 @@ def learner_setup( update_fns = (actor_optim.update, critic_optim.update) learn = get_learner_step_fn(apply_fns, update_fns, config) - learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) + learn = jax.jit( + shard_map(learn, + mesh=mesh, + in_specs=P("learner_devices"), + out_specs=P("learner_devices")) + ) + # learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) # Load model from checkpoint if specified. if config.logger.checkpointing.load_model: @@ -581,8 +594,12 @@ def run_experiment(_config: DictConfig) -> float: inital_params = unreplicate(learner_state.params) # the rollout queue/ the pipe between actor and learner + # todo: return this from/pass into: learner setup + devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) + mesh = Mesh(devices, axis_names=("learner_devices",)) + sharding = NamedSharding(mesh, P("learner_devices")) pipe_lifetime = ThreadLifetime() - pipe = Pipeline(config.arch.rollout_queue_size, learner_devices, pipe_lifetime) + pipe = Pipeline(config.arch.rollout_queue_size, sharding, pipe_lifetime) pipe.start() params_sources: List[ParamsSource] = [] diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index cead3b6ba..e2c07cf79 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -21,6 +21,7 @@ import jax import jax.numpy as jnp +from jax.sharding import Sharding from colorama import Fore, Style from jax import tree from jumanji.types import TimeStep @@ -28,7 +29,7 @@ # todo: remove the ppo dependencies from mava.systems.ppo.types import Params, PPOTransition -QUEUE_PUT_TIMEOUT = 180 +QUEUE_PUT_TIMEOUT = 100 class ThreadLifetime: @@ -48,29 +49,29 @@ def stop(self) -> None: def _stack_trajectory(trajectory: List[PPOTransition]) -> PPOTransition: """Stack a list of parallel_env transitions into a single transition of shape [rollout_len, num_envs, ...].""" - return tree.map(lambda *x: jnp.stack(x, axis=0), *trajectory) # type: ignore + return tree.map(lambda *x: jnp.stack(x, axis=0).swapaxes(0, 1), *trajectory) # type: ignore # Modified from https://github.com/instadeepai/sebulba/blob/main/sebulba/core.py class Pipeline(threading.Thread): """ - The `Pipeline` shards trajectories into `learner_devices`, + The `Pipeline` shards trajectories into learner devices, ensuring trajectories are consumed in the right order to avoid being off-policy and limit the max number of samples in device memory at one time to avoid OOM issues. """ - def __init__(self, max_size: int, learner_devices: List[jax.Device], lifetime: ThreadLifetime): + def __init__(self, max_size: int, learner_sharding: Sharding, lifetime: ThreadLifetime): """ Initializes the pipeline with a maximum size and the devices to shard trajectories across. Args: max_size: The maximum number of trajectories to keep in the pipeline. - learner_devices: The devices to shard trajectories across. + learner_sharding: The sharding used for the learner's update function. lifetime: A `ThreadLifetime` which is used to stop this thread. """ super().__init__(name="Pipeline") - self.learner_devices = learner_devices + self.sharding = learner_sharding self.tickets_queue: queue.Queue = queue.Queue() self._queue: queue.Queue = queue.Queue(maxsize=max_size) self.lifetime = lifetime @@ -97,22 +98,17 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict self.tickets_queue.put((start_condition, end_condition)) start_condition.wait() # wait to be allowed to start - # [Transition(num_envs)] * rollout_len --> Transition[done=(rollout_len, num_envs, ...)] + # [Transition(num_envs)] * rollout_len -> Transition[done=(num_envs, rollout_len, ...)] traj = _stack_trajectory(traj) - # Split trajectory on the num envs axis so each learner device gets a valid full rollout - sharded_traj = jax.tree.map(lambda x: self.shard_split_playload(x, axis=1), traj) + sharded_traj, sharded_timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) - # Timestep[(num_envs, num_agents, ...), ...] --> - # [(num_envs / num_learner_devices, num_agents, ...)] * num_learner_devices - sharded_timestep = jax.tree.map(self.shard_split_playload, timestep) - - # We block on the put to ensure that actors wait for the learners to catch up. This does two - # things: + # We block on the put to ensure that actors wait for the learners to catch up. + # This does two things: # 1. It ensures that the actors don't get too far ahead of the learners, which could lead to # off-policy data. # 2. It ensures that the actors don't in a sense "waste" samples and their time by # generating samples that the learners can't consume. - # However, we put a timeout of 180 seconds to avoid deadlocks in case the learner + # However, we put a timeout of 100 seconds to avoid deadlocks in case the learner # is not consuming the data. This is a safety measure and should not be hit in normal # operation. We use a try-finally since the lock has to be released even if an exception # is raised. @@ -149,6 +145,9 @@ def clear(self) -> None: except queue.Empty: break + def shard(self, payload: Any): + ... + def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: return self.shard_payload(self.split_payload(payload, axis)) From 4013a22fc41b46b7e8e417e62f7cdb4a0e1b68c6 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Fri, 18 Oct 2024 14:17:50 +0000 Subject: [PATCH 117/139] feat: shard_map working --- mava/systems/ppo/sebulba/ff_ippo.py | 44 +++++++++++++++++------------ mava/utils/sebulba.py | 28 ++++-------------- 2 files changed, 32 insertions(+), 40 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index e47a91c87..c6e34a7db 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -109,8 +109,7 @@ def act_fn( with RecordTimeTo(actor_timings["rollout_time"]): for _ in range(config.system.rollout_length): with RecordTimeTo(actor_timings["get_params_time"]): - # Get the latest parameters from the learner - params = params_source.get() + params = params_source.get() # Get the latest parameters from the learner obs_tpu = tree.map(move_to_device, timestep.observation) @@ -320,6 +319,7 @@ def _critic_loss_fn( "actor_loss": actor_loss, "entropy": entropy, } + # todo: don't return ent key, only pass in return (new_params, new_opt_state, entropy_key), loss_info params, opt_states, traj_batch, advantages, targets, key = update_state @@ -353,6 +353,7 @@ def _critic_loss_fn( metric = traj_batch.info return learner_state, (metric, loss_info) + # todo: shardmap decorator here? def learner_fn( learner_state: LearnerState, traj_batch: PPOTransition ) -> ExperimentOutput[LearnerState]: @@ -370,6 +371,9 @@ def learner_fn( - env_state (LogEnvState): The environment state. - timesteps (TimeStep): The last timestep of the rollout. """ + # This function is shard mapped on the batch axis, but `_update_step` needs + # the first axis to be time + traj_batch = tree.map(lambda x: x.swapaxes(0, 1), traj_batch) learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch) return ExperimentOutput( @@ -431,9 +435,8 @@ def learner_thread( # Update all the params sources so all actors can get the latest params - unreplicated_params = unreplicate(learner_state.params) for source in params_sources: - source.update(unreplicated_params) + source.update(learner_state.params) # Pass all the metrics and params to the main thread (evaluator) for logging and evaluation episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) @@ -460,6 +463,10 @@ def learner_setup( devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) mesh = Mesh(devices, axis_names=("learner_devices",)) + model_spec = P() + data_spec = P("learner_devices",) + model_sharding = NamedSharding(mesh, model_spec) # todo: return these + data_sharding = NamedSharding(mesh, data_spec) # PRNG keys. key, actor_key, critic_key = jax.random.split(key, 3) @@ -506,12 +513,15 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) + learn_state_spec = LearnerState(model_spec, model_spec, model_spec, None, data_spec) learn = get_learner_step_fn(apply_fns, update_fns, config) learn = jax.jit( - shard_map(learn, - mesh=mesh, - in_specs=P("learner_devices"), - out_specs=P("learner_devices")) + shard_map( + learn, + mesh=mesh, + in_specs=(learn_state_spec, data_spec), + out_specs=ExperimentOutput(learn_state_spec, data_spec, data_spec), + ) ) # learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) @@ -529,13 +539,11 @@ def learner_setup( # Define params to be replicated across devices and batches. key, step_keys = jax.random.split(key) opt_states = OptStates(actor_opt_state, critic_opt_state) - replicate_learner = (params, opt_states, step_keys) # Duplicate learner across Learner devices. - replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=learner_devices) + params, opt_states, step_keys = jax.device_put((params, opt_states, step_keys), model_sharding) # Initialise learner state. - params, opt_states, step_keys = replicate_learner init_learner_state = LearnerState(params, opt_states, step_keys, None, None) env.close() @@ -591,7 +599,7 @@ def run_experiment(_config: DictConfig) -> float: ) # Executor setup and launch. - inital_params = unreplicate(learner_state.params) + inital_params = jax.device_put(learner_state.params, actor_devices[0]) # unreplicate # the rollout queue/ the pipe between actor and learner # todo: return this from/pass into: learner setup @@ -657,7 +665,7 @@ def run_experiment(_config: DictConfig) -> float: ).start() max_episode_return = -np.inf - best_params = inital_params.actor_params + best_params_cpu = jax.device_get(inital_params.actor_params) # This is the main loop, all it does is evaluation and logging. # Acting and learning is happening in their own threads. @@ -681,9 +689,9 @@ def run_experiment(_config: DictConfig) -> float: ) / time_metrics["learner_time_per_eval"] logger.log(train_metrics, t, eval_step, LogEvent.TRAIN) - unreplicated_actor_params = unreplicate(learner_state.params.actor_params) + learner_state_cpu = jax.device_get(learner_state) key, eval_key = jax.random.split(key, 2) - eval_metrics = evaluator(jax.device_get(unreplicated_actor_params), eval_key, {}) + eval_metrics = evaluator(learner_state_cpu.params.actor_params, eval_key, {}) logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) episode_return = np.mean(eval_metrics["episode_return"]) @@ -691,12 +699,12 @@ def run_experiment(_config: DictConfig) -> float: if save_checkpoint: # Save a checkpoint of the learner state checkpointer.save( timestep=steps_per_rollout * (eval_step + 1), - unreplicated_learner_state=unreplicate_n_dims(learner_state), + unreplicated_learner_state=learner_state_cpu, episode_return=episode_return, ) if config.arch.absolute_metric and max_episode_return <= episode_return: - best_params = copy.deepcopy(unreplicated_actor_params) + best_params_cpu = copy.deepcopy(learner_state_cpu.params.actor_params) max_episode_return = episode_return evaluator_envs.close() @@ -709,7 +717,7 @@ def run_experiment(_config: DictConfig) -> float: environments.make_gym_env, eval_act_fn, config, np_rng, absolute_metric=True ) key, eval_key = jax.random.split(key, 2) - eval_metrics = abs_metric_evaluator(best_params, eval_key, {}) + eval_metrics = abs_metric_evaluator(best_params_cpu, eval_key, {}) t = int(steps_per_rollout * (eval_step + 1)) logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index e2c07cf79..4b1b9f758 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -102,16 +102,13 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict traj = _stack_trajectory(traj) sharded_traj, sharded_timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) - # We block on the put to ensure that actors wait for the learners to catch up. - # This does two things: - # 1. It ensures that the actors don't get too far ahead of the learners, which could lead to - # off-policy data. - # 2. It ensures that the actors don't in a sense "waste" samples and their time by - # generating samples that the learners can't consume. + # We block on the `put` to ensure that actors wait for the learners to catch up. + # This ensures two things: + # The actors don't get too far ahead of the learners, which could lead to off-policy data. + # The actors don't "waste" samples by generating samples that the learners can't consume. # However, we put a timeout of 100 seconds to avoid deadlocks in case the learner - # is not consuming the data. This is a safety measure and should not be hit in normal - # operation. We use a try-finally since the lock has to be released even if an exception - # is raised. + # is not consuming the data. This is a safety measure and should not occur in normal + # operation. We use a try-finally so the lock is released even if an exception is raised. try: self._queue.put( (sharded_traj, sharded_timestep, time_dict), @@ -145,19 +142,6 @@ def clear(self) -> None: except queue.Empty: break - def shard(self, payload: Any): - ... - - def shard_split_playload(self, payload: Any, axis: int = 0) -> Any: - return self.shard_payload(self.split_payload(payload, axis)) - - @partial(jax.jit, static_argnums=(0, 2)) - def split_payload(self, payload: Any, axis: int = 0): - return jnp.split(payload, len(self.learner_devices), axis=axis) - - def shard_payload(self, payload: Any): - return jax.device_put_sharded(payload, devices=self.learner_devices) - class ParamsSource(threading.Thread): """A `ParamSource` is a component that allows networks params to be passed from a From 0e559d99e7deb4c3e1b56745f3cabc447516d103 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Sat, 19 Oct 2024 13:56:55 +0200 Subject: [PATCH 118/139] fix: key use in actor loss --- mava/systems/ppo/sebulba/ff_ippo.py | 32 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index c6e34a7db..a139fb77c 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -21,21 +21,19 @@ from typing import Any, Dict, List, Sequence, Tuple import chex -import flax import hydra import jax import jax.debug import jax.numpy as jnp -from jax.sharding import Mesh, PartitionSpec as P -from jax.sharding import NamedSharding -from jax.experimental import mesh_utils -from jax.experimental.shard_map import shard_map import numpy as np import optax from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict -from flax.jax_utils import unreplicate from jax import tree +from jax.experimental import mesh_utils +from jax.experimental.shard_map import shard_map +from jax.sharding import Mesh, NamedSharding +from jax.sharding import PartitionSpec as P from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint @@ -44,19 +42,20 @@ from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition -from mava.types import ActorApply, CriticApply, ExperimentOutput, Observation, SebulbaLearnerFn +from mava.types import ActorApply, CriticApply, ExperimentOutput, MarlEnv, Observation, SebulbaLearnerFn from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.config import check_sebulba_config, check_total_timesteps -from mava.utils.jax_utils import merge_leading_dims, unreplicate_n_dims +from mava.utils.jax_utils import merge_leading_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics + def rollout( key: chex.PRNGKey, - env, + env: MarlEnv, config: DictConfig, rollout_queue: Pipeline, params_source: ParamsSource, @@ -319,8 +318,7 @@ def _critic_loss_fn( "actor_loss": actor_loss, "entropy": entropy, } - # todo: don't return ent key, only pass in - return (new_params, new_opt_state, entropy_key), loss_info + return (new_params, new_opt_state, key), loss_info params, opt_states, traj_batch, advantages, targets, key = update_state key, shuffle_key, entropy_key = jax.random.split(key, 3) @@ -335,7 +333,7 @@ def _critic_loss_fn( shuffled_batch, ) # Update minibatches - (params, opt_states, entropy_key), loss_info = jax.lax.scan( + (params, opt_states, _), loss_info = jax.lax.scan( _update_minibatch, (params, opt_states, entropy_key), minibatches ) @@ -430,9 +428,9 @@ def learner_thread( learner_state, episode_metrics, train_metrics = learn_fn( learner_state, combined_traj_batch ) - + metrics.append((episode_metrics, train_metrics)) - + # Update all the params sources so all actors can get the latest params for source in params_sources: @@ -517,9 +515,9 @@ def learner_setup( learn = get_learner_step_fn(apply_fns, update_fns, config) learn = jax.jit( shard_map( - learn, - mesh=mesh, - in_specs=(learn_state_spec, data_spec), + learn, + mesh=mesh, + in_specs=(learn_state_spec, data_spec), out_specs=ExperimentOutput(learn_state_spec, data_spec, data_spec), ) ) From 0a6bd49beb37d9e79896faef6b5abbaba2612c0e Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Sat, 19 Oct 2024 13:58:01 +0200 Subject: [PATCH 119/139] fix: align gym config with other configs --- mava/configs/env/lbf_gym.yaml | 9 +++++---- mava/configs/env/rware_gym.yaml | 9 +++++---- mava/utils/make_env.py | 3 ++- mava/utils/sebulba.py | 5 ++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 39d624daa..a7fa1be89 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -11,10 +11,11 @@ scenario: # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. eval_metric: episode_return -# Whether the add agents IDs to the observations returned by the environment. -add_agent_id: False - -# Whether or not to log the winrate of this environment. +# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. +# This should not be changed. +implicit_agent_id: False +# Whether or not to log the winrate of this environment. This should not be changed as not all +# environments have a winrate metric. log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index da8c73402..d3d6a49b2 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -11,10 +11,11 @@ scenario: # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. eval_metric: episode_return -# Whether the add agents IDs to the observations returned by the environment. -add_agent_id : False - -# Whether or not to log the winrate of this environment. +# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. +# This should not be changed. +implicit_agent_id: False +# Whether or not to log the winrate of this environment. This should not be changed as not all +# environments have a winrate metric. log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 1c9e4dbd0..8b9c85afd 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -243,12 +243,13 @@ def make_gym_env( Async environments. """ wrapper = _gym_registry[config.env.env_name] + config.system.add_agent_id = config.system.add_agent_id & (~config.env.implicit_agent_id) def create_gym_env(config: DictConfig, add_global_state: bool = False) -> gymnasium.Env: registered_name = f"{config.env.scenario.name}:{config.env.scenario.task_name}" env = gym.make(registered_name, disable_env_checker=False) wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) - if config.env.add_agent_id: + if config.system.add_agent_id: wrapped_env = GymAgentIDWrapper(wrapped_env) wrapped_env = GymRecordEpisodeMetrics(wrapped_env) return wrapped_env diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index 4b1b9f758..4083155d5 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -16,14 +16,13 @@ import queue import threading import time -from functools import partial from typing import Any, Dict, List, Sequence, Tuple, Union import jax import jax.numpy as jnp -from jax.sharding import Sharding from colorama import Fore, Style from jax import tree +from jax.sharding import Sharding from jumanji.types import TimeStep # todo: remove the ppo dependencies @@ -102,7 +101,7 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict traj = _stack_trajectory(traj) sharded_traj, sharded_timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) - # We block on the `put` to ensure that actors wait for the learners to catch up. + # We block on the `put` to ensure that actors wait for the learners to catch up. # This ensures two things: # The actors don't get too far ahead of the learners, which could lead to off-policy data. # The actors don't "waste" samples by generating samples that the learners can't consume. From 641a548905455874959e9e84a100449d7f24a064 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Sat, 19 Oct 2024 14:54:08 +0200 Subject: [PATCH 120/139] feat: better env creation and safer sharding --- mava/systems/ppo/sebulba/ff_ippo.py | 93 ++++++++++++++--------------- mava/utils/jax_utils.py | 3 +- mava/utils/sebulba.py | 12 ++-- mava/wrappers/jaxmarl.py | 1 - 4 files changed, 52 insertions(+), 57 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index a139fb77c..2312fb023 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -26,13 +26,14 @@ import jax.debug import jax.numpy as jnp import numpy as np +from numpy.typing import NDArray import optax from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict from jax import tree from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map -from jax.sharding import Mesh, NamedSharding +from jax.sharding import Mesh, NamedSharding, Sharding from jax.sharding import PartitionSpec as P from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint @@ -42,11 +43,18 @@ from mava.networks import FeedForwardActor as Actor from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition -from mava.types import ActorApply, CriticApply, ExperimentOutput, MarlEnv, Observation, SebulbaLearnerFn +from mava.types import ( + ActorApply, + CriticApply, + ExperimentOutput, + MarlEnv, + Observation, + SebulbaLearnerFn, +) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.config import check_sebulba_config, check_total_timesteps -from mava.utils.jax_utils import merge_leading_dims +from mava.utils.jax_utils import merge_leading_dims, switch_leading_axes from mava.utils.logger import LogEvent, MavaLogger from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate @@ -351,7 +359,6 @@ def _critic_loss_fn( metric = traj_batch.info return learner_state, (metric, loss_info) - # todo: shardmap decorator here? def learner_fn( learner_state: LearnerState, traj_batch: PPOTransition ) -> ExperimentOutput[LearnerState]: @@ -371,7 +378,7 @@ def learner_fn( """ # This function is shard mapped on the batch axis, but `_update_step` needs # the first axis to be time - traj_batch = tree.map(lambda x: x.swapaxes(0, 1), traj_batch) + traj_batch = tree.map(switch_leading_axes, traj_batch) learner_state, (episode_info, loss_info) = _update_step(learner_state, traj_batch) return ExperimentOutput( @@ -403,6 +410,7 @@ def learner_thread( accumulated_traj_batches = [] accumulated_timesteps = [] + # Possibly get many rollouts for 1 learn step - allows learning with large batches for _ in range(config.arch.n_learner_accumulate): # Get the trajectory batch from the pipeline # This is blocking so it will wait until the pipeline has data. @@ -414,43 +422,42 @@ def learner_thread( accumulated_timesteps.append(timestep) rollout_times.append(rollout_time) - # Concatenate accumulated timesteps and trajectory batches on the num_envs axis - combined_traj_batch = jax.tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_traj_batches) - combined_timesteps = jax.tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_timesteps) - + # Concatenate the accumulated timesteps and trajectory batches on the num_envs axis + traj_batches = tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_traj_batches) + timesteps = tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_timesteps) # Replace the timestep in the learner state with the latest timestep # This means the learner has access to the entire trajectory as well as # an additional timestep which it can use to bootstrap. - learner_state = learner_state._replace(timestep=combined_timesteps) + learner_state = learner_state._replace(timestep=timesteps) # Update the networks with RecordTimeTo(learn_times["learning_time"]): - learner_state, episode_metrics, train_metrics = learn_fn( - learner_state, combined_traj_batch - ) - - metrics.append((episode_metrics, train_metrics)) + learner_state, ep_metrics, train_metrics = learn_fn(learner_state, traj_batches) + metrics.append((ep_metrics, train_metrics)) # Update all the params sources so all actors can get the latest params for source in params_sources: source.update(learner_state.params) # Pass all the metrics and params to the main thread (evaluator) for logging and evaluation - episode_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) - rollout_times = tree.map(lambda *x: np.mean(x), *rollout_times) + ep_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) + rollout_times: Dict[str, NDArray] = tree.map(lambda *x: np.mean(x), *rollout_times) timing_dict = rollout_times | learn_times timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) - eval_queue.put((episode_metrics, train_metrics, learner_state, timing_dict)) + eval_queue.put((ep_metrics, train_metrics, learner_state, timing_dict)) def learner_setup( key: chex.PRNGKey, config: DictConfig, learner_devices: List ) -> Tuple[ - SebulbaLearnerFn[LearnerState, PPOTransition], Tuple[ActorApply, CriticApply], LearnerState + SebulbaLearnerFn[LearnerState, PPOTransition], + Tuple[ActorApply, CriticApply], + LearnerState, + Sharding, ]: - """Initialise learner_fn, network, optimiser, environment and states.""" + """Initialise learner_fn, network and learner state.""" # create temporory envoirnments. env = environments.make_gym_env(config, config.arch.num_envs) @@ -462,9 +469,8 @@ def learner_setup( devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) mesh = Mesh(devices, axis_names=("learner_devices",)) model_spec = P() - data_spec = P("learner_devices",) - model_sharding = NamedSharding(mesh, model_spec) # todo: return these - data_sharding = NamedSharding(mesh, data_spec) + data_spec = P("learner_devices") + learner_sharding = NamedSharding(mesh, model_spec) # PRNG keys. key, actor_key, critic_key = jax.random.split(key, 3) @@ -511,6 +517,7 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) + # defines how the learner state is sharded: params, opt and key = replicated, timestep = sharded learn_state_spec = LearnerState(model_spec, model_spec, model_spec, None, data_spec) learn = get_learner_step_fn(apply_fns, update_fns, config) learn = jax.jit( @@ -521,7 +528,6 @@ def learner_setup( out_specs=ExperimentOutput(learn_state_spec, data_spec, data_spec), ) ) - # learn = jax.pmap(learn, axis_name="learner_devices", devices=learner_devices) # Load model from checkpoint if specified. if config.logger.checkpointing.load_model: @@ -539,13 +545,15 @@ def learner_setup( opt_states = OptStates(actor_opt_state, critic_opt_state) # Duplicate learner across Learner devices. - params, opt_states, step_keys = jax.device_put((params, opt_states, step_keys), model_sharding) + params, opt_states, step_keys = jax.device_put( + (params, opt_states, step_keys), learner_sharding + ) # Initialise learner state. - init_learner_state = LearnerState(params, opt_states, step_keys, None, None) + init_learner_state = LearnerState(params, opt_states, step_keys, None, None) # type: ignore env.close() - return learn, apply_fns, init_learner_state + return learn, apply_fns, init_learner_state, learner_sharding # type: ignore def run_experiment(_config: DictConfig) -> float: @@ -564,7 +572,7 @@ def run_experiment(_config: DictConfig) -> float: np_rng = np.random.default_rng(config.system.seed) # Setup learner. - learn, apply_fns, learner_state = learner_setup(key, config, learner_devices) + learn, apply_fns, learner_state, learner_sharding = learner_setup(key, config, learner_devices) # Setup evaluator. # One key per device for evaluation. @@ -578,7 +586,10 @@ def run_experiment(_config: DictConfig) -> float: check_sebulba_config(config) steps_per_rollout = ( - config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval * config.arch.n_learner_accumulate + config.system.rollout_length + * config.arch.num_envs + * config.system.num_updates_per_eval + * config.arch.n_learner_accumulate ) # Logger setup @@ -600,12 +611,8 @@ def run_experiment(_config: DictConfig) -> float: inital_params = jax.device_put(learner_state.params, actor_devices[0]) # unreplicate # the rollout queue/ the pipe between actor and learner - # todo: return this from/pass into: learner setup - devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) - mesh = Mesh(devices, axis_names=("learner_devices",)) - sharding = NamedSharding(mesh, P("learner_devices")) pipe_lifetime = ThreadLifetime() - pipe = Pipeline(config.arch.rollout_queue_size, sharding, pipe_lifetime) + pipe = Pipeline(config.arch.rollout_queue_size, learner_sharding, pipe_lifetime) pipe.start() params_sources: List[ParamsSource] = [] @@ -613,20 +620,9 @@ def run_experiment(_config: DictConfig) -> float: actor_lifetime = ThreadLifetime() params_sources_lifetime = ThreadLifetime() - # Unfortunately we have to do this here, because creating envs inside the actor threads causes deadlocks - # todo: see what happens if we do this in the thread creating loop - envs = [[] for i in range(len(actor_devices))] - print( - f"{Fore.BLUE}{Style.BRIGHT}Starting up environments, this may take a while...{Style.RESET_ALL}" - ) - for i in range(len(actor_devices)): - for _ in range(config.arch.n_threads_per_executor): - env = environments.make_gym_env(config, config.arch.num_envs) - envs[i].append(env) - print(f"{Fore.BLUE}{Style.BRIGHT}All environments created{Style.RESET_ALL}") - # Create the actor threads - for dev_idx, actor_device in enumerate(actor_devices): + print(f"{Fore.BLUE}{Style.BRIGHT}Starting up actor threads...{Style.RESET_ALL}") + for actor_device in actor_devices: # Create 1 params source per device params_source = ParamsSource(inital_params, actor_device, params_sources_lifetime) params_source.start() @@ -641,7 +637,8 @@ def run_experiment(_config: DictConfig) -> float: target=rollout, args=( act_key, - envs[dev_idx][thread_id], + # We have to do this here, creating envs inside actor threads causes deadlocks + environments.make_gym_env(config, config.arch.num_envs), config, pipe, params_source, diff --git a/mava/utils/jax_utils.py b/mava/utils/jax_utils.py index 3c03455f2..c89c6a4a4 100644 --- a/mava/utils/jax_utils.py +++ b/mava/utils/jax_utils.py @@ -71,5 +71,4 @@ def unreplicate_batch_dim(x: Any) -> Any: def switch_leading_axes(arr: chex.Array) -> chex.Array: """Switches the first two axes, generally used for BT -> TB.""" - arr = tree.map(lambda x: jax.numpy.swapaxes(x, 0, 1), arr) - return arr + return tree.map(lambda x: x.swapaxes(0, 1), arr) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index 4083155d5..8fffe4d48 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -25,7 +25,7 @@ from jax.sharding import Sharding from jumanji.types import TimeStep -# todo: remove the ppo dependencies +# todo: remove the ppo dependencies when we make sebulba for other systems from mava.systems.ppo.types import Params, PPOTransition QUEUE_PUT_TIMEOUT = 100 @@ -99,22 +99,22 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict # [Transition(num_envs)] * rollout_len -> Transition[done=(num_envs, rollout_len, ...)] traj = _stack_trajectory(traj) - sharded_traj, sharded_timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) + traj, timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) # We block on the `put` to ensure that actors wait for the learners to catch up. # This ensures two things: # The actors don't get too far ahead of the learners, which could lead to off-policy data. # The actors don't "waste" samples by generating samples that the learners can't consume. # However, we put a timeout of 100 seconds to avoid deadlocks in case the learner - # is not consuming the data. This is a safety measure and should not occur in normal - # operation. We use a try-finally so the lock is released even if an exception is raised. + # is not consuming the data. This is a safety measure and should not normally occur. + # We use a try-finally so the lock is released even if an exception is raised. try: self._queue.put( - (sharded_traj, sharded_timestep, time_dict), + (traj, timestep, time_dict), block=True, timeout=QUEUE_PUT_TIMEOUT, ) - except queue.Full: # todo: check if this is needed because we catch this exception outside + except queue.Full: print( f"{Fore.RED}{Style.BRIGHT}Pipeline is full and actor has timed out, " f"this should not happen. A deadlock might be occurring{Style.RESET_ALL}" diff --git a/mava/wrappers/jaxmarl.py b/mava/wrappers/jaxmarl.py index 72608f85f..f6ad51558 100644 --- a/mava/wrappers/jaxmarl.py +++ b/mava/wrappers/jaxmarl.py @@ -214,7 +214,6 @@ def reset( def step( self, state: JaxMarlState, action: Array ) -> Tuple[JaxMarlState, TimeStep[Union[Observation, ObservationGlobalState]]]: - # todo: how do you know if it's a truncation with only dones? key, step_key = jax.random.split(state.key) obs, env_state, reward, done, _ = self._env.step( step_key, state.state, unbatchify(action, self.agents) From c0c88bc2b782d05a7b1b2d2fbdfe552fec9d14f9 Mon Sep 17 00:00:00 2001 From: Sasha Abramowitz Date: Sat, 19 Oct 2024 15:09:28 +0200 Subject: [PATCH 121/139] chore: minor env typing fixes --- mava/systems/ppo/sebulba/ff_ippo.py | 7 ++++--- mava/wrappers/gym.py | 20 +++++++++----------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 2312fb023..35a5d86ec 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -47,7 +47,6 @@ ActorApply, CriticApply, ExperimentOutput, - MarlEnv, Observation, SebulbaLearnerFn, ) @@ -59,11 +58,12 @@ from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics +from mava.wrappers.gym import GymToJumanji def rollout( key: chex.PRNGKey, - env: MarlEnv, + env: GymToJumanji, config: DictConfig, rollout_queue: Pipeline, params_source: ParamsSource, @@ -101,7 +101,8 @@ def act_fn( actor_policy = actor_apply_fn(params.actor_params, observation) action = actor_policy.sample(seed=key) log_prob = actor_policy.log_prob(action) - + # It may be faster to calculate the values in the learner as + # then we won't need to pass critic params to actors. value = critic_apply_fn(params.critic_params, observation).squeeze() return action, log_prob, value diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 048294893..fa42e5e82 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -29,7 +29,7 @@ from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray -from mava.types import Observation, ObservationGlobalState +from mava.types import MarlEnv, Observation, ObservationGlobalState if TYPE_CHECKING: # https://github.com/python/mypy/issues/6239 from dataclasses import dataclass @@ -217,19 +217,17 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: class GymToJumanji: - """Converts from the Gym API to the dm_env API, using Jumanji's Timestep type.""" + """Converts from the Gym API to the dm_env API.""" - def __init__(self, env: gymnasium.vector.async_vector_env): + def __init__(self, env: gymnasium.vector.VectorEnv): self.env = env self.single_action_space = env.unwrapped.single_action_space self.single_observation_space = env.unwrapped.single_observation_space - def reset( - self, seed: Optional[list[int]] = None, options: Optional[list[dict]] = None - ) -> TimeStep: - obs, info = self.env.reset(seed=seed, options=options) + def reset(self, seed: Optional[list[int]] = None, options: Optional[dict] = None) -> TimeStep: + obs, info = self.env.reset(seed=seed, options=options) # type: ignore - num_agents = len(self.env.single_action_space) + num_agents = len(self.env.single_action_space) # type: ignore num_envs = self.env.num_envs ep_done = np.zeros(num_envs, dtype=float) @@ -269,16 +267,16 @@ def _format_observation( def _create_timestep( self, obs: NDArray, ep_done: NDArray, terminated: NDArray, rewards: NDArray, info: Dict ) -> TimeStep: - obs = self._format_observation(obs, info) + observation = self._format_observation(obs, info) # Filter out the masks and auxiliary data extras = {key: value for key, value in info["metrics"].items() if key[0] != "_"} step_type = np.where(ep_done, StepType.LAST, StepType.MID) return TimeStep( - step_type=step_type, + step_type=step_type, # type: ignore reward=rewards, discount=1.0 - terminated, - observation=obs, + observation=observation, extras=extras, ) From 6b2d01c2fc854be4342c4049d88e7b79397894cd Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Mon, 21 Oct 2024 11:11:09 +0100 Subject: [PATCH 122/139] fix: start actors simultaneously to avoid deadlocks --- mava/systems/ppo/sebulba/ff_ippo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 35a5d86ec..971088a97 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -650,8 +650,11 @@ def run_experiment(_config: DictConfig) -> float: ), name=f"Actor-{actor_device}-{thread_id}", ) - actor.start() actor_threads.append(actor) + + # Start the actors simultaneously + for actor in actor_threads: + actor.start() eval_queue: Queue = Queue() threading.Thread( From a13ab65cd4cb4aaa5d54643c1b01c989800023b9 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 23 Oct 2024 14:05:53 +0100 Subject: [PATCH 123/139] feat: support for smac --- mava/configs/default/ff_ippo_sebulba.yaml | 2 +- mava/configs/env/lbf_gym.yaml | 3 +++ mava/configs/env/rware_gym.yaml | 3 +++ mava/configs/env/smac_gym.yaml | 25 +++++++++++++++++++++++ mava/utils/make_env.py | 4 +++- mava/utils/sebulba.py | 2 +- mava/wrappers/__init__.py | 1 + mava/wrappers/gym.py | 15 +++++++++++++- requirements/requirements.txt | 1 + 9 files changed, 52 insertions(+), 4 deletions(-) create mode 100644 mava/configs/env/smac_gym.yaml diff --git a/mava/configs/default/ff_ippo_sebulba.yaml b/mava/configs/default/ff_ippo_sebulba.yaml index 7669049b1..cc2b4acae 100644 --- a/mava/configs/default/ff_ippo_sebulba.yaml +++ b/mava/configs/default/ff_ippo_sebulba.yaml @@ -3,7 +3,7 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp # [mlp, continuous_mlp, cnn] - - env: rware_gym # [rware_gym, lbf_gym] + - env: smac_gym # [rware_gym, lbf_gym, smac_gym] - _self_ hydra: diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index a7fa1be89..7ae03d010 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -20,3 +20,6 @@ log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. use_shared_rewards: True + +kwargs: + max_episode_steps: 100 \ No newline at end of file diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index d3d6a49b2..0fcd41a2b 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -20,3 +20,6 @@ log_win_rate: False # Weather or not to sum the returned rewards over all of the agents. use_shared_rewards: True + +kwargs: + max_episode_steps: 500 \ No newline at end of file diff --git a/mava/configs/env/smac_gym.yaml b/mava/configs/env/smac_gym.yaml new file mode 100644 index 000000000..a4d8b7031 --- /dev/null +++ b/mava/configs/env/smac_gym.yaml @@ -0,0 +1,25 @@ +# ---Environment Configs--- +defaults: + - _self_ + +env_name: Starcraft # Used for logging purposes. +scenario: + name: smaclite + task_name: smaclite/2s3z-v0 # smaclite/ + ['10m_vs_11m-v0', '27m_vs_30m-v0', '3s5z_vs_3s6z-v0', '2s3z-v0', '3s5z-v0', 'MMM-v0', 'MMM2-v0', '2c_vs_64zg-v0', 'bane_vs_bane-v0', 'corridor-v0', '2s_vs_1sc-v0', '3s_vs_5z-v0'] + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. +# This should not be changed. +implicit_agent_id: False +# Whether or not to log the winrate of this environment. This should not be changed as not all +# environments have a winrate metric. +log_win_rate: False + +# Weather or not to sum the returned rewards over all of the agents. +use_shared_rewards: True + +kwargs: + max_episode_steps: 500 \ No newline at end of file diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 8b9c85afd..32a85155c 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -54,6 +54,7 @@ RecordEpisodeMetrics, RwareWrapper, SmaxWrapper, + SmacWrapper, async_multiagent_worker, ) from mava.wrappers.jaxmarl import JaxMarlWrapper @@ -77,6 +78,7 @@ _gym_registry = { "RobotWarehouse": GymWrapper, "LevelBasedForaging": GymWrapper, + "Starcraft": SmacWrapper, } @@ -247,7 +249,7 @@ def make_gym_env( def create_gym_env(config: DictConfig, add_global_state: bool = False) -> gymnasium.Env: registered_name = f"{config.env.scenario.name}:{config.env.scenario.task_name}" - env = gym.make(registered_name, disable_env_checker=False) + env = gym.make(registered_name, disable_env_checker=False, **config.env.kwargs) wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) if config.system.add_agent_id: wrapped_env = GymAgentIDWrapper(wrapped_env) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index 8fffe4d48..cab1ddd0e 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -99,7 +99,7 @@ def put(self, traj: Sequence[PPOTransition], timestep: TimeStep, time_dict: Dict # [Transition(num_envs)] * rollout_len -> Transition[done=(num_envs, rollout_len, ...)] traj = _stack_trajectory(traj) - traj, timestep = jax.device_put((traj, timestep), device=self.sharding, donate=True) + traj, timestep = jax.device_put((traj, timestep), device=self.sharding) # We block on the `put` to ensure that actors wait for the learners to catch up. # This ensures two things: diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index f8cf8a64c..f7e89d756 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -21,6 +21,7 @@ GymRecordEpisodeMetrics, GymToJumanji, GymWrapper, + SmacWrapper, async_multiagent_worker, ) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index fa42e5e82..aa64e2755 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -106,7 +106,7 @@ def reset( return np.array(agents_view), info - def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: + def step(self, actions: Tuple) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} @@ -128,7 +128,20 @@ def get_actions_mask(self, info: Dict) -> NDArray: def get_global_obs(self, obs: NDArray) -> NDArray: global_obs = np.concatenate(obs, axis=0) return np.tile(global_obs, (self.num_agents, 1)) + +class SmacWrapper(GymWrapper): + """A wrapper that converts actions step to integers.""" + + def step(self, actions: Tuple) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: + # Convert actions to integers before passing them to the environment + actions = [int(action) for action in actions] + + agents_view, reward, terminated, truncated, info = super().step(actions) + return agents_view, reward, terminated, truncated, info + + def get_actions_mask(self, info: Dict) -> NDArray: + return np.array(self._env.unwrapped.get_avail_actions()) class GymRecordEpisodeMetrics(gymnasium.Wrapper): """Record the episode returns and lengths.""" diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 61f7fe68a..5522b2e82 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -25,3 +25,4 @@ scipy==1.12.0 tensorboard_logger tensorflow_probability type_enforced # needed because gigastep is missing this dependency +smaclite @ git+https://github.com/uoe-agents/smaclite.git \ No newline at end of file From bc55375a399c6a8ba2ac702a31791214e4026cd6 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 23 Oct 2024 14:43:55 +0100 Subject: [PATCH 124/139] chore: pre-commits --- mava/configs/env/lbf_gym.yaml | 2 +- mava/configs/env/rware_gym.yaml | 2 +- mava/configs/env/smac_gym.yaml | 2 +- mava/evaluator.py | 4 +++- mava/systems/ppo/sebulba/ff_ippo.py | 17 ++++++++--------- mava/utils/config.py | 2 +- mava/utils/make_env.py | 2 +- mava/wrappers/gym.py | 14 ++++++++------ requirements/requirements.txt | 2 +- 9 files changed, 25 insertions(+), 22 deletions(-) diff --git a/mava/configs/env/lbf_gym.yaml b/mava/configs/env/lbf_gym.yaml index 7ae03d010..f001e0913 100644 --- a/mava/configs/env/lbf_gym.yaml +++ b/mava/configs/env/lbf_gym.yaml @@ -22,4 +22,4 @@ log_win_rate: False use_shared_rewards: True kwargs: - max_episode_steps: 100 \ No newline at end of file + max_episode_steps: 100 diff --git a/mava/configs/env/rware_gym.yaml b/mava/configs/env/rware_gym.yaml index 0fcd41a2b..facf7f8d7 100644 --- a/mava/configs/env/rware_gym.yaml +++ b/mava/configs/env/rware_gym.yaml @@ -22,4 +22,4 @@ log_win_rate: False use_shared_rewards: True kwargs: - max_episode_steps: 500 \ No newline at end of file + max_episode_steps: 500 diff --git a/mava/configs/env/smac_gym.yaml b/mava/configs/env/smac_gym.yaml index a4d8b7031..1f2f48c89 100644 --- a/mava/configs/env/smac_gym.yaml +++ b/mava/configs/env/smac_gym.yaml @@ -22,4 +22,4 @@ log_win_rate: False use_shared_rewards: True kwargs: - max_episode_steps: 500 \ No newline at end of file + max_episode_steps: 500 diff --git a/mava/evaluator.py b/mava/evaluator.py index 99d4eb8d4..8e4dd5dee 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -239,7 +239,9 @@ def get_sebulba_eval_fn( episode_loops = math.ceil(eval_episodes / n_parallel_envs) env = env_maker(config, n_parallel_envs) - act_fn = jax.jit(act_fn, device=jax.devices('cpu')[0]) # cpu so that we don't block actors/learners + act_fn = jax.jit( + act_fn, device=jax.devices("cpu")[0] + ) # cpu so that we don't block actors/learners # Warnings if num eval episodes is not divisible by num parallel envs. if eval_episodes % n_parallel_envs != 0: diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 971088a97..2ab554f69 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -26,15 +26,14 @@ import jax.debug import jax.numpy as jnp import numpy as np -from numpy.typing import NDArray import optax from colorama import Fore, Style from flax.core.frozen_dict import FrozenDict from jax import tree from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map -from jax.sharding import Mesh, NamedSharding, Sharding -from jax.sharding import PartitionSpec as P +from jax.sharding import Mesh, NamedSharding, PartitionSpec, Sharding +from numpy.typing import NDArray from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint @@ -165,7 +164,7 @@ def get_learner_step_fn( ) -> SebulbaLearnerFn[LearnerState, PPOTransition]: """Get the learner function.""" - num_agents, num_envs = config.system.num_agents, config.arch.num_envs + num_envs = config.arch.num_envs num_learner_envs = int(num_envs // len(config.arch.learner_device_ids)) # Get apply and update functions for actor and critic networks. @@ -469,8 +468,8 @@ def learner_setup( devices = mesh_utils.create_device_mesh((len(learner_devices),), devices=learner_devices) mesh = Mesh(devices, axis_names=("learner_devices",)) - model_spec = P() - data_spec = P("learner_devices") + model_spec = PartitionSpec() + data_spec = PartitionSpec("learner_devices") learner_sharding = NamedSharding(mesh, model_spec) # PRNG keys. @@ -651,8 +650,8 @@ def run_experiment(_config: DictConfig) -> float: name=f"Actor-{actor_device}-{thread_id}", ) actor_threads.append(actor) - - # Start the actors simultaneously + + # Start the actors simultaneously for actor in actor_threads: actor.start() @@ -704,7 +703,7 @@ def run_experiment(_config: DictConfig) -> float: if config.arch.absolute_metric and max_episode_return <= episode_return: best_params_cpu = copy.deepcopy(learner_state_cpu.params.actor_params) - max_episode_return = episode_return + max_episode_return = float(episode_return) evaluator_envs.close() eval_performance = float(np.mean(eval_metrics[config.env.eval_metric])) diff --git a/mava/utils/config.py b/mava/utils/config.py index 34a35f091..c82e3a315 100644 --- a/mava/utils/config.py +++ b/mava/utils/config.py @@ -46,7 +46,7 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: if config.arch.architecture_name == "anakin": n_devices = len(jax.devices()) update_batch_size = config.system.update_batch_size - n_accumulate = 1 # We dont accumulate envs in anakin + n_accumulate = 1 # We dont accumulate envs in anakin else: n_devices = 1 # We only use a single device's output when updating. update_batch_size = 1 diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 32a85155c..1206d3886 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -53,8 +53,8 @@ MatraxWrapper, RecordEpisodeMetrics, RwareWrapper, - SmaxWrapper, SmacWrapper, + SmaxWrapper, async_multiagent_worker, ) from mava.wrappers.jaxmarl import JaxMarlWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index aa64e2755..020abf158 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -19,7 +19,7 @@ from enum import IntEnum from multiprocessing import Queue from multiprocessing.connection import Connection -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import gymnasium import gymnasium.vector.async_vector_env @@ -29,7 +29,7 @@ from gymnasium.vector.utils import write_to_shared_memory from numpy.typing import NDArray -from mava.types import MarlEnv, Observation, ObservationGlobalState +from mava.types import Observation, ObservationGlobalState if TYPE_CHECKING: # https://github.com/python/mypy/issues/6239 from dataclasses import dataclass @@ -106,7 +106,7 @@ def reset( return np.array(agents_view), info - def step(self, actions: Tuple) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: + def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) info = {"actions_mask": self.get_actions_mask(info)} @@ -128,21 +128,23 @@ def get_actions_mask(self, info: Dict) -> NDArray: def get_global_obs(self, obs: NDArray) -> NDArray: global_obs = np.concatenate(obs, axis=0) return np.tile(global_obs, (self.num_agents, 1)) - + + class SmacWrapper(GymWrapper): """A wrapper that converts actions step to integers.""" - def step(self, actions: Tuple) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: + def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: # Convert actions to integers before passing them to the environment actions = [int(action) for action in actions] agents_view, reward, terminated, truncated, info = super().step(actions) return agents_view, reward, terminated, truncated, info - + def get_actions_mask(self, info: Dict) -> NDArray: return np.array(self._env.unwrapped.get_avail_actions()) + class GymRecordEpisodeMetrics(gymnasium.Wrapper): """Record the episode returns and lengths.""" diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 5522b2e82..13ff3a050 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -22,7 +22,7 @@ optax protobuf~=3.20 rware scipy==1.12.0 +smaclite @ git+https://github.com/uoe-agents/smaclite.git tensorboard_logger tensorflow_probability type_enforced # needed because gigastep is missing this dependency -smaclite @ git+https://github.com/uoe-agents/smaclite.git \ No newline at end of file From c6d460f73d9ed00cd635f2a45f99b9f946825249 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Sun, 27 Oct 2024 16:09:04 +0100 Subject: [PATCH 125/139] fix: random segfault --- mava/systems/ppo/sebulba/ff_ippo.py | 3 ++- mava/utils/sebulba.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 2ab554f69..1869ba092 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -437,8 +437,9 @@ def learner_thread( metrics.append((ep_metrics, train_metrics)) # Update all the params sources so all actors can get the latest params + params = jax.block_until_ready(learner_state.params) for source in params_sources: - source.update(learner_state.params) + source.update(params) # Pass all the metrics and params to the main thread (evaluator) for logging and evaluation ep_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index cab1ddd0e..0e2e6261d 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -161,7 +161,7 @@ def run(self) -> None: while not self.lifetime.should_stop(): try: waiting = self.new_value.get(block=True, timeout=1) - self.value = jax.device_put(jax.block_until_ready(waiting), self.device) + self.value = jax.device_put(waiting, self.device) except queue.Empty: continue From 659a83776030e2ec8601c4490136d486019f2777 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Mon, 4 Nov 2024 16:31:10 +0100 Subject: [PATCH 126/139] fix: give each learner a unique random key --- mava/configs/default/ff_ippo_sebulba.yaml | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 9 ++++++--- mava/wrappers/gym.py | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mava/configs/default/ff_ippo_sebulba.yaml b/mava/configs/default/ff_ippo_sebulba.yaml index cc2b4acae..ee5d1887d 100644 --- a/mava/configs/default/ff_ippo_sebulba.yaml +++ b/mava/configs/default/ff_ippo_sebulba.yaml @@ -3,7 +3,7 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp # [mlp, continuous_mlp, cnn] - - env: smac_gym # [rware_gym, lbf_gym, smac_gym] + - env: lbf_gym # [rware_gym, lbf_gym, smac_gym] - _self_ hydra: diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 1869ba092..18207deef 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -329,7 +329,9 @@ def _critic_loss_fn( return (new_params, new_opt_state, key), loss_info params, opt_states, traj_batch, advantages, targets, key = update_state + key = jnp.squeeze(key, axis=0) # Remove the learner_devices axis key, shuffle_key, entropy_key = jax.random.split(key, 3) + key = jnp.expand_dims(key, axis=0) # add the learner_devices axis for shape consitency # Shuffle minibatches batch_size = config.system.rollout_length * num_learner_envs permutation = jax.random.permutation(shuffle_key, batch_size) @@ -518,8 +520,8 @@ def learner_setup( apply_fns = (actor_network.apply, critic_network.apply) update_fns = (actor_optim.update, critic_optim.update) - # defines how the learner state is sharded: params, opt and key = replicated, timestep = sharded - learn_state_spec = LearnerState(model_spec, model_spec, model_spec, None, data_spec) + # defines how the learner state is sharded: params, opt and key = sharded, timestep = sharded + learn_state_spec = LearnerState(model_spec, model_spec, data_spec, None, data_spec) learn = get_learner_step_fn(apply_fns, update_fns, config) learn = jax.jit( shard_map( @@ -542,7 +544,8 @@ def learner_setup( params = restored_params # Define params to be replicated across devices and batches. - key, step_keys = jax.random.split(key) + key, *step_keys = jax.random.split(key, len(learner_devices) + 1) + step_keys = jnp.stack(step_keys, 0) opt_states = OptStates(actor_opt_state, critic_opt_state) # Duplicate learner across Learner devices. diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 020abf158..6ac23b38c 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -267,7 +267,7 @@ def _format_observation( ) -> Union[Observation, ObservationGlobalState]: """Create an observation from the raw observation and environment state.""" - # (num_agents, num_envs, ...) -> (num_envs, num_agents, ...) + # (N, B, O) -> (B, N, O) obs = np.array(obs).swapaxes(0, 1) action_mask = np.stack(info["actions_mask"]) obs_data = {"agents_view": obs, "action_mask": action_mask} From 7deb75baaaed32d428b708e9baf11b749b696582 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Tue, 5 Nov 2024 14:53:51 +0100 Subject: [PATCH 127/139] chore: bunch of minor changes and fixes --- mava/configs/env/smac_gym.yaml | 4 ++-- mava/evaluator.py | 14 ++++++++------ mava/utils/make_env.py | 2 +- mava/wrappers/gym.py | 19 +++++++++++-------- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/mava/configs/env/smac_gym.yaml b/mava/configs/env/smac_gym.yaml index 1f2f48c89..9fbbea022 100644 --- a/mava/configs/env/smac_gym.yaml +++ b/mava/configs/env/smac_gym.yaml @@ -2,7 +2,7 @@ defaults: - _self_ -env_name: Starcraft # Used for logging purposes. +env_name: SMAC # Used for logging purposes. scenario: name: smaclite task_name: smaclite/2s3z-v0 # smaclite/ + ['10m_vs_11m-v0', '27m_vs_30m-v0', '3s5z_vs_3s6z-v0', '2s3z-v0', '3s5z-v0', 'MMM-v0', 'MMM2-v0', '2c_vs_64zg-v0', 'bane_vs_bane-v0', 'corridor-v0', '2s_vs_1sc-v0', '3s_vs_5z-v0'] @@ -16,7 +16,7 @@ eval_metric: episode_return implicit_agent_id: False # Whether or not to log the winrate of this environment. This should not be changed as not all # environments have a winrate metric. -log_win_rate: False +log_win_rate: True # Weather or not to sum the returned rewards over all of the agents. use_shared_rewards: True diff --git a/mava/evaluator.py b/mava/evaluator.py index 8e4dd5dee..c43983c45 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -221,11 +221,12 @@ def get_sebulba_eval_fn( Args: ---- - env: an environment that conforms to the mava environment spec. - act_fn: a function that takes in params, timestep, key and optionally a state + env_maker: A function to create the environment instances. + act_fn: A function that takes in params, timestep, key and optionally a state and returns actions and optionally a state (see `EvalActFn`). - config: the system config. - absolute_metric: whether or not this evaluator calculates the absolute_metric. + config: The system config. + np_rng: Random number generator for seeding environment. + absolute_metric: Whether or not this evaluator calculates the absolute_metric. This determines how many evaluation episodes it does. """ n_devices = jax.device_count() @@ -240,8 +241,8 @@ def get_sebulba_eval_fn( env = env_maker(config, n_parallel_envs) act_fn = jax.jit( - act_fn, device=jax.devices("cpu")[0] - ) # cpu so that we don't block actors/learners + act_fn, device=jax.local_devices()[config.arch.actor_device_ids[0]] + ) # Evaluate using the first actor device # Warnings if num eval episodes is not divisible by num parallel envs. if eval_episodes % n_parallel_envs != 0: @@ -264,6 +265,7 @@ def eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Met def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: """Simulates `num_envs` episodes.""" + # Generate a list of random seeds within the 32-bit integer range, using a seeded RNG. seeds = np_rng.integers(np.iinfo(np.int32).max, size=n_parallel_envs).tolist() ts = env.reset(seed=seeds) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 1206d3886..aaceabd73 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -78,7 +78,7 @@ _gym_registry = { "RobotWarehouse": GymWrapper, "LevelBasedForaging": GymWrapper, - "Starcraft": SmacWrapper, + "SMAC": SmacWrapper, } diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 6ac23b38c..9f53e52bb 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -42,7 +42,7 @@ # needed to avoid host -> device transfers when calling TimeStep.last() class StepType(IntEnum): - """Coppy of Jumanji's step type but with numpy arrays""" + """Copy of Jumanji's step type but with numpy arrays""" FIRST = 0 MID = 1 @@ -69,7 +69,7 @@ def last(self) -> bool: class GymWrapper(gymnasium.Wrapper): """Base wrapper for multi-agent gym environments. - This wrapper works out of the box for RobotWarehouse and level based foraging. + This wrapper works out of the box for RobotWarehouse and level-based foraging. """ def __init__( @@ -100,7 +100,7 @@ def reset( agents_view, info = self._env.reset() - info = {"actions_mask": self.get_actions_mask(info)} + info = {"action_mask": self.get_action_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) @@ -109,7 +109,7 @@ def reset( def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) - info = {"actions_mask": self.get_actions_mask(info)} + info = {"action_mask": self.get_action_mask(info)} if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) @@ -120,7 +120,7 @@ def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict] return agents_view, reward, terminated, truncated, info - def get_actions_mask(self, info: Dict) -> NDArray: + def get_action_mask(self, info: Dict) -> NDArray: if "action_mask" in info: return np.array(info["action_mask"]) return np.ones((self.num_agents, self.num_actions), dtype=np.float32) @@ -138,10 +138,11 @@ def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict] actions = [int(action) for action in actions] agents_view, reward, terminated, truncated, info = super().step(actions) + info["won_episode"] = info["battle_won"] return agents_view, reward, terminated, truncated, info - def get_actions_mask(self, info: Dict) -> NDArray: + def get_action_mask(self, info: Dict) -> NDArray: return np.array(self._env.unwrapped.get_avail_actions()) @@ -232,7 +233,7 @@ def modify_space(self, space: spaces.Space) -> spaces.Space: class GymToJumanji: - """Converts from the Gym API to the dm_env API.""" + """Converts from the Gym API to the Jumanji API.""" def __init__(self, env: gymnasium.vector.VectorEnv): self.env = env @@ -269,7 +270,7 @@ def _format_observation( # (N, B, O) -> (B, N, O) obs = np.array(obs).swapaxes(0, 1) - action_mask = np.stack(info["actions_mask"]) + action_mask = np.stack(info["action_mask"]) obs_data = {"agents_view": obs, "action_mask": action_mask} if "global_obs" in info: @@ -301,6 +302,8 @@ def close(self) -> None: # Copied form Gymnasium/blob/main/gymnasium/vector/async_vector_env.py # Modified to work with multiple agents +# Note: The worker handles auto-resetting the environments. +# Each environment resets when all of its agents have either terminated or been truncated. def async_multiagent_worker( # CCR001 index: int, env_fn: Callable, From c024b71e28ae4519498149bdcfbbf6c392c9fa54 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 6 Nov 2024 16:07:26 +0100 Subject: [PATCH 128/139] chore: removed learner accumulation --- mava/configs/arch/sebulba.yaml | 1 - mava/systems/ppo/sebulba/ff_ippo.py | 33 +++++++---------------------- mava/utils/config.py | 4 ---- 3 files changed, 8 insertions(+), 30 deletions(-) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index 278b0592d..d8f44fd3c 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -18,7 +18,6 @@ absolute_metric: True # Whether the absolute metric should be computed. For more n_threads_per_executor: 2 # num of different threads/env batches per actor actor_device_ids: [0] # ids of actor devices learner_device_ids: [0] # ids of learner devices -n_learner_accumulate: 1 # Number of envoirnments to accumulate before updating the parameters. This determines the num_envs for learning updates which equals (num_envs * n_learner_accumulate) / len(learner_device_ids). rollout_queue_size : 5 # The size of the pipeline queue determines the extent of off-policy training allowed. A larger value permits more off-policy training. # Too large of a value with too many actors will lead to all of the updates getting wasted in old episodes diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 18207deef..789634f42 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -408,35 +408,21 @@ def learner_thread( with RecordTimeTo(learn_times["learner_time_per_eval"]): for _ in range(config.system.num_updates_per_eval): - # Accumulate the batches, timesteps, and rollout times - accumulated_traj_batches = [] - accumulated_timesteps = [] - - # Possibly get many rollouts for 1 learn step - allows learning with large batches - for _ in range(config.arch.n_learner_accumulate): - # Get the trajectory batch from the pipeline - # This is blocking so it will wait until the pipeline has data. - with RecordTimeTo(learn_times["rollout_get_time"]): - traj_batch, timestep, rollout_time = pipeline.get(block=True) - - # Store the retrieved data - accumulated_traj_batches.append(traj_batch) - accumulated_timesteps.append(timestep) - rollout_times.append(rollout_time) - - # Concatenate the accumulated timesteps and trajectory batches on the num_envs axis - traj_batches = tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_traj_batches) - timesteps = tree.map(lambda *x: jnp.concat(x, axis=0), *accumulated_timesteps) + # Get the trajectory batch from the pipeline + # This is blocking so it will wait until the pipeline has data. + with RecordTimeTo(learn_times["rollout_get_time"]): + traj_batch, timestep, rollout_time = pipeline.get(block=True) # Replace the timestep in the learner state with the latest timestep # This means the learner has access to the entire trajectory as well as # an additional timestep which it can use to bootstrap. - learner_state = learner_state._replace(timestep=timesteps) + learner_state = learner_state._replace(timestep=timestep) # Update the networks with RecordTimeTo(learn_times["learning_time"]): - learner_state, ep_metrics, train_metrics = learn_fn(learner_state, traj_batches) + learner_state, ep_metrics, train_metrics = learn_fn(learner_state, traj_batch) metrics.append((ep_metrics, train_metrics)) + rollout_times.append(rollout_time) # Update all the params sources so all actors can get the latest params params = jax.block_until_ready(learner_state.params) @@ -590,10 +576,7 @@ def run_experiment(_config: DictConfig) -> float: check_sebulba_config(config) steps_per_rollout = ( - config.system.rollout_length - * config.arch.num_envs - * config.system.num_updates_per_eval - * config.arch.n_learner_accumulate + config.system.rollout_length * config.arch.num_envs * config.system.num_updates_per_eval ) # Logger setup diff --git a/mava/utils/config.py b/mava/utils/config.py index c82e3a315..23484311b 100644 --- a/mava/utils/config.py +++ b/mava/utils/config.py @@ -46,11 +46,9 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: if config.arch.architecture_name == "anakin": n_devices = len(jax.devices()) update_batch_size = config.system.update_batch_size - n_accumulate = 1 # We dont accumulate envs in anakin else: n_devices = 1 # We only use a single device's output when updating. update_batch_size = 1 - n_accumulate = config.arch.n_learner_accumulate if config.system.total_timesteps is None: config.system.num_updates = int(config.system.num_updates) @@ -60,7 +58,6 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: * config.system.rollout_length * update_batch_size * config.arch.num_envs - * n_accumulate ) else: config.system.total_timesteps = int(config.system.total_timesteps) @@ -70,7 +67,6 @@ def check_total_timesteps(config: DictConfig) -> DictConfig: // update_batch_size // config.arch.num_envs // n_devices - // n_accumulate ) print( f"{Fore.RED}{Style.BRIGHT} Changing the number of updates " From db378b9a9f252aa313dc06c013841ff4a6270e57 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Thu, 7 Nov 2024 11:38:03 +0100 Subject: [PATCH 129/139] fix: Metric tracking more aligned with Jumanji --- mava/wrappers/gym.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 9f53e52bb..7c97b03cb 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -160,17 +160,17 @@ def reset( ) -> Tuple[NDArray, Dict]: agents_view, info = self._env.reset(seed, options) + # Reset the metrics + self.running_count_episode_return = 0.0 + self.running_count_episode_length = 0.0 + # Create the metrics dict metrics = { "episode_return": self.running_count_episode_return, "episode_length": self.running_count_episode_length, - "is_terminal_step": True, + "is_terminal_step": False, } - # Reset the metrics - self.running_count_episode_return = 0.0 - self.running_count_episode_length = 0 - if "won_episode" in info: metrics["won_episode"] = info["won_episode"] @@ -187,7 +187,7 @@ def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Di metrics = { "episode_return": self.running_count_episode_return, "episode_length": self.running_count_episode_length, - "is_terminal_step": False, + "is_terminal_step": np.logical_or(terminated, truncated).all().item(), } if "won_episode" in info: metrics["won_episode"] = info["won_episode"] @@ -338,7 +338,7 @@ def async_multiagent_worker( # CCR001 info, ) = env.step(data) if np.logical_or(terminated, truncated).all(): - observation, info = env.reset() + observation, _ = env.reset() if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) From 3d3cec88a28039cc43d18569f360ca70f26ac8c6 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Fri, 8 Nov 2024 11:51:29 +0100 Subject: [PATCH 130/139] fix: removed axis swaping & wrapper rename --- mava/evaluator.py | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 2 +- mava/utils/make_env.py | 6 +++--- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 29 +++++++++++++++++++---------- 5 files changed, 25 insertions(+), 16 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index c43983c45..dc6963a00 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -277,7 +277,7 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: while not finished_eps.all(): key, act_key = jax.random.split(key) action, actor_state = act_fn(params, ts, act_key, actor_state) - cpu_action = jax.device_get(action).swapaxes(0, 1) + cpu_action = jax.device_get(action) ts = env.step(cpu_action) timesteps.append(ts) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 789634f42..8470fe008 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -128,7 +128,7 @@ def act_fn( # Step environment with RecordTimeTo(actor_timings["env_step_time"]): - timestep = env.step(cpu_action.swapaxes(0, 1)) + timestep = env.step(cpu_action) dones = np.repeat(timestep.last(), num_agents).reshape(num_envs, -1) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index aaceabd73..583bd8009 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -47,7 +47,7 @@ GymAgentIDWrapper, GymRecordEpisodeMetrics, GymToJumanji, - GymWrapper, + UoeWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -76,8 +76,8 @@ _gigastep_registry = {"Gigastep": GigastepWrapper} _gym_registry = { - "RobotWarehouse": GymWrapper, - "LevelBasedForaging": GymWrapper, + "RobotWarehouse": UoeWrapper, + "LevelBasedForaging": UoeWrapper, "SMAC": SmacWrapper, } diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index f7e89d756..50b38db82 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -20,7 +20,7 @@ GymAgentIDWrapper, GymRecordEpisodeMetrics, GymToJumanji, - GymWrapper, + UoeWrapper, SmacWrapper, async_multiagent_worker, ) diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 7c97b03cb..6de018a8a 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -66,10 +66,9 @@ def mid(self) -> bool: def last(self) -> bool: return self.step_type == StepType.LAST - -class GymWrapper(gymnasium.Wrapper): - """Base wrapper for multi-agent gym environments. - This wrapper works out of the box for RobotWarehouse and level-based foraging. +class UoeWrapper(gymnasium.Wrapper): + """A base wrapper for multi-agent environments developed by the University of Edinburgh. + This wrapper is compatible with the RobotWarehouse and Level-Based Foraging environments. """ def __init__( @@ -92,6 +91,18 @@ def __init__( self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n + #Tuple(Box(...) * N) --> Box(N, ...) + single_obs = self.observation_space[0] + shape = (self.num_agents, *single_obs.shape) + low = np.tile(single_obs.low, (self.num_agents, 1)) + high = np.tile(single_obs.high, (self.num_agents,1) ) + self.observation_space = spaces.Box( + low=low, high=high, shape=shape, dtype=single_obs.dtype + ) + + #Tuple(Discrete(...) * N) --> Discrete(N, ...) + self.action_space = spaces.MultiDiscrete([self.num_actions] * self.num_agents) + def reset( self, seed: Optional[int] = None, options: Optional[dict] = None ) -> Tuple[NDArray, Dict]: @@ -130,7 +141,7 @@ def get_global_obs(self, obs: NDArray) -> NDArray: return np.tile(global_obs, (self.num_agents, 1)) -class SmacWrapper(GymWrapper): +class SmacWrapper(UoeWrapper): """A wrapper that converts actions step to integers.""" def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: @@ -222,9 +233,9 @@ def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: def modify_space(self, space: spaces.Space) -> spaces.Space: if isinstance(space, spaces.Box): - new_shape = (space.shape[0] + len(self.agent_ids),) + new_shape = (space.shape[0] , space.shape[1] + len(self.agent_ids)) return spaces.Box( - low=space.low[0], high=space.high[0], shape=new_shape, dtype=space.dtype + low=space.low[0][0], high=space.high[0][0], shape=new_shape, dtype=space.dtype ) elif isinstance(space, spaces.Tuple): return spaces.Tuple(self.modify_space(s) for s in space) @@ -268,13 +279,11 @@ def _format_observation( ) -> Union[Observation, ObservationGlobalState]: """Create an observation from the raw observation and environment state.""" - # (N, B, O) -> (B, N, O) - obs = np.array(obs).swapaxes(0, 1) action_mask = np.stack(info["action_mask"]) obs_data = {"agents_view": obs, "action_mask": action_mask} if "global_obs" in info: - global_obs = np.array(info["global_obs"]).swapaxes(0, 1) + global_obs = np.array(info["global_obs"]) obs_data["global_state"] = global_obs return ObservationGlobalState(**obs_data) else: From a7665f9fc4ccef6d9734359f5332d4401e22d4d7 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Fri, 8 Nov 2024 11:54:38 +0100 Subject: [PATCH 131/139] chore: pre-commits --- mava/utils/make_env.py | 2 +- mava/wrappers/__init__.py | 2 +- mava/wrappers/gym.py | 15 +++++++-------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 583bd8009..805ac1ad2 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -47,7 +47,6 @@ GymAgentIDWrapper, GymRecordEpisodeMetrics, GymToJumanji, - UoeWrapper, LbfWrapper, MabraxWrapper, MatraxWrapper, @@ -55,6 +54,7 @@ RwareWrapper, SmacWrapper, SmaxWrapper, + UoeWrapper, async_multiagent_worker, ) from mava.wrappers.jaxmarl import JaxMarlWrapper diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index 50b38db82..a241c9658 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -20,8 +20,8 @@ GymAgentIDWrapper, GymRecordEpisodeMetrics, GymToJumanji, - UoeWrapper, SmacWrapper, + UoeWrapper, async_multiagent_worker, ) from mava.wrappers.jaxmarl import MabraxWrapper, SmaxWrapper diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 6de018a8a..f01951192 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -66,6 +66,7 @@ def mid(self) -> bool: def last(self) -> bool: return self.step_type == StepType.LAST + class UoeWrapper(gymnasium.Wrapper): """A base wrapper for multi-agent environments developed by the University of Edinburgh. This wrapper is compatible with the RobotWarehouse and Level-Based Foraging environments. @@ -91,16 +92,14 @@ def __init__( self.num_agents = len(self._env.action_space) self.num_actions = self._env.action_space[0].n - #Tuple(Box(...) * N) --> Box(N, ...) - single_obs = self.observation_space[0] + # Tuple(Box(...) * N) --> Box(N, ...) + single_obs = self.observation_space[0] # type: ignore shape = (self.num_agents, *single_obs.shape) low = np.tile(single_obs.low, (self.num_agents, 1)) - high = np.tile(single_obs.high, (self.num_agents,1) ) - self.observation_space = spaces.Box( - low=low, high=high, shape=shape, dtype=single_obs.dtype - ) + high = np.tile(single_obs.high, (self.num_agents, 1)) + self.observation_space = spaces.Box(low=low, high=high, shape=shape, dtype=single_obs.dtype) - #Tuple(Discrete(...) * N) --> Discrete(N, ...) + # Tuple(Discrete(...) * N) --> MultiDiscrete(... * N) self.action_space = spaces.MultiDiscrete([self.num_actions] * self.num_agents) def reset( @@ -233,7 +232,7 @@ def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: def modify_space(self, space: spaces.Space) -> spaces.Space: if isinstance(space, spaces.Box): - new_shape = (space.shape[0] , space.shape[1] + len(self.agent_ids)) + new_shape = (space.shape[0], space.shape[1] + len(self.agent_ids)) return spaces.Box( low=space.low[0][0], high=space.high[0][0], shape=new_shape, dtype=space.dtype ) From 0c4e83b2ea7ec6cbf107782a10b6320752a46f8e Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Fri, 8 Nov 2024 21:30:31 +0100 Subject: [PATCH 132/139] chore: bunch of minor changes --- mava/configs/arch/sebulba.yaml | 6 +++--- mava/configs/default/ff_ippo_sebulba.yaml | 2 +- .../env/{smac_gym.yaml => smaclite_gym.yaml} | 4 ++-- mava/evaluator.py | 5 ++--- mava/utils/make_env.py | 2 +- mava/utils/sebulba.py | 2 ++ mava/wrappers/gym.py | 20 +++++++++---------- 7 files changed, 21 insertions(+), 20 deletions(-) rename mava/configs/env/{smac_gym.yaml => smaclite_gym.yaml} (79%) diff --git a/mava/configs/arch/sebulba.yaml b/mava/configs/arch/sebulba.yaml index d8f44fd3c..52ee0ffbf 100644 --- a/mava/configs/arch/sebulba.yaml +++ b/mava/configs/arch/sebulba.yaml @@ -8,9 +8,9 @@ num_envs: 32 # number of environments per thread. evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_eval_episodes: 200 # Number of episodes to evaluate per evaluation. -num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. -num_absolute_metric_eval_episodes: 32 # Number of episodes to evaluate the absolute metric (the final evaluation). +num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. +num_evaluation: 100 # Number of evenly spaced evaluations to perform during training. +num_absolute_metric_eval_episodes: 320 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details # on the absolute metric please see: https://arxiv.org/abs/2209.10485 diff --git a/mava/configs/default/ff_ippo_sebulba.yaml b/mava/configs/default/ff_ippo_sebulba.yaml index ee5d1887d..d0ecfae97 100644 --- a/mava/configs/default/ff_ippo_sebulba.yaml +++ b/mava/configs/default/ff_ippo_sebulba.yaml @@ -3,7 +3,7 @@ defaults: - arch: sebulba - system: ppo/ff_ippo - network: mlp # [mlp, continuous_mlp, cnn] - - env: lbf_gym # [rware_gym, lbf_gym, smac_gym] + - env: lbf_gym # [rware_gym, lbf_gym, smaclite_gym] - _self_ hydra: diff --git a/mava/configs/env/smac_gym.yaml b/mava/configs/env/smaclite_gym.yaml similarity index 79% rename from mava/configs/env/smac_gym.yaml rename to mava/configs/env/smaclite_gym.yaml index 9fbbea022..967daec88 100644 --- a/mava/configs/env/smac_gym.yaml +++ b/mava/configs/env/smaclite_gym.yaml @@ -2,10 +2,10 @@ defaults: - _self_ -env_name: SMAC # Used for logging purposes. +env_name: SMACLite # Used for logging purposes. scenario: name: smaclite - task_name: smaclite/2s3z-v0 # smaclite/ + ['10m_vs_11m-v0', '27m_vs_30m-v0', '3s5z_vs_3s6z-v0', '2s3z-v0', '3s5z-v0', 'MMM-v0', 'MMM2-v0', '2c_vs_64zg-v0', 'bane_vs_bane-v0', 'corridor-v0', '2s_vs_1sc-v0', '3s_vs_5z-v0'] + task_name: smaclite/2s3z-v0 # smaclite/ + ['10m_vs_11m-v0', '27m_vs_30m-v0', '3s5z_vs_3s6z-v0', '2s3z-v0', '3s5z-v0', '2c_vs_64zg-v0', '2s_vs_1sc-v0', '3s_vs_5z-v0'] # Defines the metric that will be used to evaluate the performance of the agent. # This metric is returned at the end of an experiment and can be used for hyperparameter tuning. diff --git a/mava/evaluator.py b/mava/evaluator.py index dc6963a00..a996d8d38 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -305,9 +305,8 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: key, metric = _episode(key) metrics.append(metric) - metrics: Metrics = jax.tree_map( - lambda *x: np.array(x).reshape(-1), *metrics - ) # flatten metrics + # flatten metrics + metrics: Metrics = jax.tree_map(lambda *x: np.array(x).reshape(-1), *metrics) return metrics def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Metrics: diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 805ac1ad2..3289db0d8 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -249,7 +249,7 @@ def make_gym_env( def create_gym_env(config: DictConfig, add_global_state: bool = False) -> gymnasium.Env: registered_name = f"{config.env.scenario.name}:{config.env.scenario.task_name}" - env = gym.make(registered_name, disable_env_checker=False, **config.env.kwargs) + env = gym.make(registered_name, disable_env_checker=True, **config.env.kwargs) wrapped_env = wrapper(env, config.env.use_shared_rewards, add_global_state) if config.system.add_agent_id: wrapped_env = GymAgentIDWrapper(wrapped_env) diff --git a/mava/utils/sebulba.py b/mava/utils/sebulba.py index 0e2e6261d..dc51140f5 100644 --- a/mava/utils/sebulba.py +++ b/mava/utils/sebulba.py @@ -179,6 +179,8 @@ def get(self) -> Params: class RecordTimeTo: + """Context manager to record the runtime in a `with` block""" + def __init__(self, to: Any): self.to = to diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index f01951192..feda920b7 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -42,7 +42,7 @@ # needed to avoid host -> device transfers when calling TimeStep.last() class StepType(IntEnum): - """Copy of Jumanji's step type but with numpy arrays""" + """Copy of Jumanji's step type but without jax arrays""" FIRST = 0 MID = 1 @@ -232,10 +232,10 @@ def step(self, action: list) -> Tuple[NDArray, float, bool, bool, Dict]: def modify_space(self, space: spaces.Space) -> spaces.Space: if isinstance(space, spaces.Box): - new_shape = (space.shape[0], space.shape[1] + len(self.agent_ids)) - return spaces.Box( - low=space.low[0][0], high=space.high[0][0], shape=new_shape, dtype=space.dtype - ) + new_shape = (space.shape[0], space.shape[1] + self.env.num_agents) + high = np.concatenate((space.high, np.ones_like(self.agent_ids)), axis=1) + low = np.concatenate((space.low, np.zeros_like(self.agent_ids)), axis=1) + return spaces.Box(low=low, high=high, shape=new_shape, dtype=space.dtype) elif isinstance(space, spaces.Tuple): return spaces.Tuple(self.modify_space(s) for s in space) else: @@ -256,11 +256,11 @@ def reset(self, seed: Optional[list[int]] = None, options: Optional[dict] = None num_agents = len(self.env.single_action_space) # type: ignore num_envs = self.env.num_envs - ep_done = np.zeros(num_envs, dtype=float) + step_type = np.full(num_envs, StepType.FIRST) rewards = np.zeros((num_envs, num_agents), dtype=float) teminated = np.zeros(num_envs, dtype=float) - timestep = self._create_timestep(obs, ep_done, teminated, rewards, info) + timestep = self._create_timestep(obs, step_type, teminated, rewards, info) return timestep @@ -268,8 +268,9 @@ def step(self, action: list) -> TimeStep: obs, rewards, terminated, truncated, info = self.env.step(action) ep_done = np.logical_or(terminated, truncated) + step_type = np.where(ep_done, StepType.LAST, StepType.MID) - timestep = self._create_timestep(obs, ep_done, terminated, rewards, info) + timestep = self._create_timestep(obs, step_type, terminated, rewards, info) return timestep @@ -289,12 +290,11 @@ def _format_observation( return Observation(**obs_data) def _create_timestep( - self, obs: NDArray, ep_done: NDArray, terminated: NDArray, rewards: NDArray, info: Dict + self, obs: NDArray, step_type: NDArray, terminated: NDArray, rewards: NDArray, info: Dict ) -> TimeStep: observation = self._format_observation(obs, info) # Filter out the masks and auxiliary data extras = {key: value for key, value in info["metrics"].items() if key[0] != "_"} - step_type = np.where(ep_done, StepType.LAST, StepType.MID) return TimeStep( step_type=step_type, # type: ignore From 245aeccce6b9aa7c711637eb8ad0c3008e6f6efb Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Tue, 12 Nov 2024 22:11:16 +0100 Subject: [PATCH 133/139] fix: smaclite win rate tracking --- mava/evaluator.py | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 2 +- mava/utils/make_env.py | 2 +- mava/wrappers/gym.py | 26 +++++++++++++++++--------- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index a996d8d38..11a1f8f4a 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -285,7 +285,7 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: timesteps = jax.tree.map(lambda *x: np.stack(x), *timesteps) - metrics = timesteps.extras + metrics = timesteps.extras["episode_metrics"] if config.env.log_win_rate: metrics["won_episode"] = timesteps.extras["won_episode"] diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 8470fe008..468957c46 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -141,7 +141,7 @@ def act_fn( timestep.reward, log_prob, obs_tpu, - timestep.extras, + timestep.extras["episode_metrics"], ) ) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 3289db0d8..9d32112c9 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -78,7 +78,7 @@ _gym_registry = { "RobotWarehouse": UoeWrapper, "LevelBasedForaging": UoeWrapper, - "SMAC": SmacWrapper, + "SMACLite": SmacWrapper, } diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index feda920b7..594fdc7eb 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -110,7 +110,7 @@ def reset( agents_view, info = self._env.reset() - info = {"action_mask": self.get_action_mask(info)} + info["action_mask"] = self.get_action_mask(info) if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) @@ -119,7 +119,7 @@ def reset( def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: agents_view, reward, terminated, truncated, info = self._env.step(actions) - info = {"action_mask": self.get_action_mask(info)} + info["action_mask"] = self.get_action_mask(info) if self.add_global_state: info["global_obs"] = self.get_global_obs(agents_view) @@ -143,6 +143,13 @@ def get_global_obs(self, obs: NDArray) -> NDArray: class SmacWrapper(UoeWrapper): """A wrapper that converts actions step to integers.""" + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[NDArray, Dict]: + agents_view, info = super().reset() + info["won_episode"] = info["battle_won"] + return agents_view, info + def step(self, actions: List) -> Tuple[NDArray, NDArray, NDArray, NDArray, Dict]: # Convert actions to integers before passing them to the environment actions = [int(action) for action in actions] @@ -181,9 +188,6 @@ def reset( "is_terminal_step": False, } - if "won_episode" in info: - metrics["won_episode"] = info["won_episode"] - info["metrics"] = metrics return agents_view, info @@ -199,8 +203,6 @@ def step(self, actions: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray, Di "episode_length": self.running_count_episode_length, "is_terminal_step": np.logical_or(terminated, truncated).all().item(), } - if "won_episode" in info: - metrics["won_episode"] = info["won_episode"] info["metrics"] = metrics @@ -294,7 +296,12 @@ def _create_timestep( ) -> TimeStep: observation = self._format_observation(obs, info) # Filter out the masks and auxiliary data - extras = {key: value for key, value in info["metrics"].items() if key[0] != "_"} + extras = {} + extras["episode_metrics"] = { + key: value for key, value in info["metrics"].items() if key[0] != "_" + } + if "won_episode" in info: + extras["won_episode"] = info["won_episode"] return TimeStep( step_type=step_type, # type: ignore @@ -346,7 +353,8 @@ def async_multiagent_worker( # CCR001 info, ) = env.step(data) if np.logical_or(terminated, truncated).all(): - observation, _ = env.reset() + observation, new_info = env.reset() + info["action_mask"] = new_info["action_mask"] if shared_memory: write_to_shared_memory(observation_space, index, observation, shared_memory) From 649b93be9c62ad147e0f5a4bff8ff9de546f2010 Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Tue, 12 Nov 2024 22:39:18 +0100 Subject: [PATCH 134/139] Squashed commit of the following: commit 6092dc656cd73ee5a0fb0dd6e29c50b11c9b84ac Merge: 73537c5f 3ddcbff7 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Fri Nov 8 15:45:07 2024 +0200 Merge pull request #1130 from instadeepai/fix/sable-pos-encoding fix: limit timestep-pos-encoding to rec-Sable commit 3ddcbff74fe1fa221c037e9701a502fcd6c8aa64 Author: OmaymaMahjoub Date: Fri Nov 8 11:14:21 2024 +0000 docs: update docs commit daf1c199b4e2bdf0a9c012f6681d5fdb18781a25 Author: OmaymaMahjoub Date: Fri Nov 8 11:11:45 2024 +0000 fix: controling timestep positional encoding in acting phase commit 73537c5f2294773fc73ba9e4f71203e13c97fc59 Merge: 905710fc d3631094 Author: Wiem Khlifi Date: Thu Nov 7 15:35:30 2024 +0100 Merge pull request #1126 from instadeepai/fix/mabrax fix: mabrax requirement commit d3631094feec5e8de3b3ff23382ac447414bb8fe Author: Sasha Abramowitz Date: Thu Nov 7 14:52:54 2024 +0200 fix: mabrax requirement commit 905710fc7d14e2567640268be72fc59835e31697 Merge: c86604c4 bb8e1073 Author: Omayma Mahjoub Date: Thu Nov 7 13:29:08 2024 +0100 Merge pull request #1113 from instadeepai/feat/sable Add Sable [Discrete actions] commit bb8e1073187cd9bd5ca5d4c04bbf385868ae9546 Author: Omayma Mahjoub Date: Thu Nov 7 11:05:38 2024 +0100 Update mava/systems/sable/anakin/ff_sable.py Co-authored-by: Sasha Abramowitz commit b3b43ec05ebed5205e465d2bad7f75dc5825baa2 Author: Omayma Mahjoub Date: Thu Nov 7 11:05:27 2024 +0100 Update mava/systems/sable/anakin/ff_sable.py Co-authored-by: Sasha Abramowitz commit 408c027e0e7366d539d163e36831764f323580e3 Author: Omayma Mahjoub Date: Thu Nov 7 11:05:20 2024 +0100 Update mava/systems/sable/anakin/rec_sable.py Co-authored-by: Sasha Abramowitz commit 3c250b838fe2ad7b6bac3e3ec770364aecf38c45 Author: Omayma Mahjoub Date: Thu Nov 7 11:04:23 2024 +0100 Update mava/networks/sable_network.py Co-authored-by: Sasha Abramowitz commit 18f7e662055f12519b4c5a6f3bbc54ea3e8bce16 Author: OmaymaMahjoub Date: Thu Nov 7 10:03:34 2024 +0000 feat: update decoder file by removing unnecessary functions commit a0daaebf80d3407c7e5c03389dcab6e2b9d0b2bd Author: OmaymaMahjoub Date: Thu Nov 7 09:52:32 2024 +0000 feat: update docs based on review Co-authored-by: Sasha Abramowitz commit 210faddc59c88a44a6a8c16e70e27767802c6116 Author: OmaymaMahjoub Date: Thu Nov 7 09:26:22 2024 +0100 fix: run pre commits commit 8546254ccb15febd92694b2a554bc7c8d08d9cbf Author: Omayma Mahjoub Date: Thu Nov 7 09:24:41 2024 +0100 Update mava/systems/sable/anakin/rec_sable.py Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 5b7156ff75c248a49f817397ca00f9d77215172d Author: Omayma Mahjoub Date: Thu Nov 7 09:24:33 2024 +0100 Update mava/systems/sable/anakin/ff_sable.py Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit d3719baa79b26ff9e580968d90e7ee319bd6c374 Author: Omayma Mahjoub Date: Thu Nov 7 09:24:26 2024 +0100 Update mava/systems/sable/anakin/ff_sable.py Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 011995d5cfd6287b7e9d25d1c049d128533d17a0 Author: OmaymaMahjoub Date: Wed Nov 6 16:18:04 2024 +0100 feat: move all system specific config setup to the system file commit e552509e9aac94b4941969fa6e9acb9f4d4282e3 Author: OmaymaMahjoub Date: Wed Nov 6 15:58:38 2024 +0100 feat: checkpointer hstate retoring fix commit bf58ded9038e79c544c3f82e419e71499270273b Author: OmaymaMahjoub Date: Wed Nov 6 14:58:38 2024 +0100 feat: move concat agents and time to jax utils commit e0ce8f42f16cedea4d20bac709520ff17f40bbf2 Author: OmaymaMahjoub Date: Wed Nov 6 14:56:19 2024 +0100 feat: get the positional encoding flag outside the util fn commit aa9cba8864b020c08d0effc1eafcfd9008108e1c Author: OmaymaMahjoub Date: Wed Nov 6 14:50:51 2024 +0100 feat: split encoder_decoder_Fn to two files commit 0030b356ad4bbb7248a8b330334af89b92c78cbc Author: OmaymaMahjoub Date: Wed Nov 6 14:45:49 2024 +0100 feat: use input hstate as the output variable instead of using extra hs variable commit d9432f4aad865b1495e7b9f9e538d4dad3c3bc77 Author: OmaymaMahjoub Date: Wed Nov 6 12:58:50 2024 +0000 feat: rename retentions to retention_heads commit 73ff86fca63bb7664ffe12dac91de0a407310e6f Author: OmaymaMahjoub Date: Wed Nov 6 12:45:54 2024 +0000 feat: replace init fn of sable net to get_actions one commit 3998b51a33aed49735dcf1ab9b335424cf1a1263 Author: OmaymaMahjoub Date: Wed Nov 6 12:37:48 2024 +0000 feat: send optimizer update fn directly without intermediate var commit 9f36fe6ac1fe333b661d4644c7f4468aff978a42 Author: OmaymaMahjoub Date: Wed Nov 6 12:35:18 2024 +0000 feat: move squeezing output of the net to inside the net fns commit a6370a97493415e2d34bf881bf9edeece3839180 Author: OmaymaMahjoub Date: Wed Nov 6 12:27:14 2024 +0000 docs: update some docs commit d80cf9186b111e56067cfb16272c57cca41aa9d0 Author: OmaymaMahjoub Date: Wed Nov 6 12:18:25 2024 +0000 feat: replace full attn flag by masking flag commit 5f214cef462cd968975380fd122c2e17d5a5574e Author: OmaymaMahjoub Date: Wed Nov 6 12:14:00 2024 +0000 feat: use the chunk size only to decide on use chunkwise flag for that commit b5d39934e21c244f4af31940f82c506c10146620 Author: Omayma Mahjoub Date: Wed Nov 6 12:58:47 2024 +0100 Update mava/networks/utils/sable/encoder_decoder_fns.py Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 670de846ed4f9818ec9c3864e16b9448d1a3ab23 Author: Omayma Mahjoub Date: Wed Nov 6 12:58:27 2024 +0100 Update mava/networks/utils/sable/encoder_decoder_fns.py Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit de16e844f7e82aa20ba736e532bf1d44f70bf5c6 Author: OmaymaMahjoub Date: Wed Nov 6 11:56:59 2024 +0000 feat: rename training apply callable type to LearnerApply commit 8af3bb407a5780bd45dd99fc29ed4aae78efc4ff Author: OmaymaMahjoub Date: Wed Nov 6 11:54:52 2024 +0000 feat: addressing some renaming suggestions commit f0360d1341c21b2a3ae0d8401ee04d1fd95d7b78 Author: OmaymaMahjoub Date: Wed Nov 6 11:05:56 2024 +0000 chore: rename obs_carry to observation commit 57e3b517b376b88fb0a39d602023cd4e258b41a3 Author: OmaymaMahjoub Date: Wed Nov 6 10:45:20 2024 +0000 fix: renmaing the shape related to n_agents and actions_dim commit 437b8f62f35b988777d8c796ed43ad052f734707 Merge: e0c863c2 b3ac1d9b Author: OmaymaMahjoub Date: Wed Nov 6 10:36:46 2024 +0000 Merge branch 'feat/sable' of github.com:instadeepai/Mava into feat/sable commit e0c863c233676f791e105d0b22f0b3d187236de8 Author: OmaymaMahjoub Date: Wed Nov 6 10:35:57 2024 +0000 feat: update the action type to follow up same MAT standards Co-authored-by: Sasha Abramowitz commit b3ac1d9bca01d1a6e221d626147b3640b688c6f2 Merge: 7646a2f0 c86604c4 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Nov 6 12:15:45 2024 +0200 Merge branch 'develop' into feat/sable commit 7646a2f03d9ce2e255d142b30bfb7490aa8e97e7 Author: OmaymaMahjoub Date: Wed Nov 6 10:00:06 2024 +0000 fix: update timeout in workflow to 20 min Co-authored-by: Sasha Abramowitz commit 0dd0eab6a9ff823ad7508177b8bc4b7265cc1ccb Author: OmaymaMahjoub Date: Wed Nov 6 09:53:51 2024 +0000 feat: update shifting action method in autoregressive act Co-authored-by: Sasha Abramowitz commit 945937cc977db0e469f12ca3227d66c595491d08 Author: OmaymaMahjoub Date: Wed Nov 6 09:36:29 2024 +0000 feat: standardize the definition of net config to NamedTuple commit c86604c4a6232d6bafee99a6ebfa7693cd652ebe Merge: 7f2568a7 fb5c97c6 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Tue Nov 5 16:52:40 2024 +0200 Merge pull request #1120 from instadeepai/feat/vector-connector-wrapper Add vector connector wrapper commit fb5c97c61ac60d6484b2f51493feb10103b8d1ea Author: RuanJohn Date: Tue Nov 5 15:34:57 2024 +0200 chore: docstring commit d1a0c1c6406f95170268afb4c0b548b8ef177e08 Author: OmaymaMahjoub Date: Tue Nov 5 10:27:00 2024 +0000 feat: make intermediate line to calculate decay_matrix commit ae652fcefb7d29c7f8877556dcffc275cfeb4886 Merge: 1d8515e5 7f2568a7 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Tue Nov 5 10:19:46 2024 +0200 Merge branch 'develop' into feat/vector-connector-wrapper commit 7f2568a7a3944b6ca3195f6561ada55ee163d864 Merge: 3577523b b689a83e Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Tue Nov 5 10:19:22 2024 +0200 Merge pull request #1123 from instadeepai/chore/num-minibatches-assert Chore: Add asserts for number of envs divisible by number of minibatches commit b689a83e87044c1241dc29aa435fc7ba061336ce Merge: d555f21a 3577523b Author: Sasha Abramowitz Date: Tue Nov 5 10:01:53 2024 +0200 Merge branch 'develop' into chore/num-minibatches-assert commit eb625901cb3b3c1ed9a99ceaaa3111993c34f2ac Author: OmaymaMahjoub Date: Mon Nov 4 16:18:45 2024 +0000 fix: major fix of sending non zero hstate for autoregressive act commit 69f39a57712561b8a42f5da6671ac6576b80c6f0 Author: OmaymaMahjoub Date: Mon Nov 4 13:15:13 2024 +0000 feat: rename shape vars in encoder decoder fns file commit 7068a689cf8f7783043dc9b20a60a50e8fd39fa5 Author: OmaymaMahjoub Date: Mon Nov 4 10:19:20 2024 +0000 feat: merge the chunkwise and parallel fns into one commit 938541283de54f71084f83724d11f0ceb40dc3eb Author: OmaymaMahjoub Date: Mon Nov 4 07:50:22 2024 +0000 feat: move make eval fn to system files commit 75ced75c2f989e3221e83583bc0ad8c7097c93e4 Author: OmaymaMahjoub Date: Mon Nov 4 07:26:36 2024 +0000 feat: move sable util fns to network folder commit 1d8515e5a36e6b5d1a70ba04b821b6fc96d7019c Merge: fd276c0f 3577523b Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Nov 4 09:06:34 2024 +0200 Merge branch 'develop' into feat/vector-connector-wrapper commit 1d38c24f555405f154bff0e0bcc38f94b81b6923 Author: OmaymaMahjoub Date: Mon Nov 4 05:55:09 2024 +0000 feat: update checkpointer fn output types commit 2b80a7d8586fa2fed630519f58fd94b28f321b4c Author: OmaymaMahjoub Date: Sat Nov 2 18:09:22 2024 +0000 feat: update sable hstate attributes naming commit 584b0d4551921db42725c375b8872350cdf98dfc Author: OmaymaMahjoub Date: Sat Nov 2 18:03:10 2024 +0000 chore: update tree map commit dd21d04dfdec9843e401b91fc1a277ba9a78b9c2 Author: OmaymaMahjoub Date: Sat Nov 2 18:00:26 2024 +0000 chore: docs fixes in sable network file commit 30351515867db54556521316d990686a79a8343f Author: OmaymaMahjoub Date: Sat Nov 2 17:18:57 2024 +0000 chore: docs fixes in retention file commit 3577523b3a2f32bd35a2bf2e91f14a539a451e95 Merge: 327e0664 3373c579 Author: Wiem Khlifi Date: Sat Nov 2 14:27:05 2024 +0100 Merge pull request #1119 from instadeepai/fix/quickstart-notebook Fix quickstart notebook commit 3373c57929d7cfb7c3b40c0f0218716bcfa3a1f6 Author: WiemKhlifi Date: Fri Nov 1 17:06:31 2024 +0100 revert: point on develop for installation commit 65d1f2d3dc080816f10f8cec3882471a0013ba90 Merge: d866bd57 327e0664 Author: Wiem Khlifi Date: Fri Nov 1 16:31:29 2024 +0100 Merge branch 'develop' into fix/quickstart-notebook commit fd276c0f5b4df580a1d0a37282ed36c7669c852d Merge: 3f658ee5 327e0664 Author: Wiem Khlifi Date: Fri Nov 1 16:25:08 2024 +0100 Merge branch 'develop' into feat/vector-connector-wrapper commit d555f21aecb48aa2b8bf5545c24742aa46e51c1a Author: SimonDuToit Date: Fri Nov 1 17:04:39 2024 +0200 pre-commit commit b11fb37123971e220164b722ce26bef19e895de5 Merge: 63785093 327e0664 Author: SimonDuToit Date: Fri Nov 1 16:34:08 2024 +0200 Merge branch 'develop' into chore/num-minibatches-assert commit 327e0664fafbb3ba18ea6d1f8a48166c9106c5d4 Merge: 6eed2d2f 7944e41b Author: Wiem Khlifi Date: Fri Nov 1 15:32:45 2024 +0100 Merge pull request #1121 from instadeepai/feat/more-rware-scenarios More rware scenario configs commit 63785093873a0d21df2e3fa6b91b03516e1d0a16 Author: SimonDuToit Date: Fri Nov 1 16:32:28 2024 +0200 add asserts commit d866bd575ba205a0ba20bed931edb19ee4151e67 Author: Sasha Abramowitz Date: Fri Nov 1 16:24:55 2024 +0200 chore: update explainer text in example notebook commit 648337049aee98822458ceb41c371a77f62ff777 Author: Omayma Mahjoub Date: Fri Nov 1 10:31:19 2024 +0100 Update mava/configs/network/ff_retention.yaml Co-authored-by: Sasha Abramowitz commit aa8b455eadc7390a483b17316d0c41b28e6c77aa Author: Omayma Mahjoub Date: Fri Nov 1 10:31:03 2024 +0100 Update mava/configs/network/rec_retention.yaml Co-authored-by: Sasha Abramowitz commit 3f658ee50571cb08543d68c2883d2879191c3196 Author: Ruan de Kock Date: Thu Oct 31 17:48:33 2024 +0200 test: add vector connector to integration tests commit 69db3eb1851b6c9e20f3db71758e88d8b39312ab Author: Ruan de Kock Date: Thu Oct 31 17:10:45 2024 +0200 feat: separate env config for vector connector commit 1fdfce910f0dcc9ef44eb2c2c9607f7eb45c5762 Author: OmaymaMahjoub Date: Thu Oct 31 14:00:37 2024 +0000 fix: define decay scaling factor for ff sable before sending config to enc-dec Co-authored By: sash-a commit 283b6a9dc2f8587f6e0feb2ad70703d9a0fa5d32 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Thu Oct 31 15:09:03 2024 +0200 feat: use boolean masks instead of jnp.where Co-authored-by: Sasha Abramowitz commit ba52ce4f463a1101994ee079bdc69cc1296376c9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Thu Oct 31 15:08:34 2024 +0200 chore: remove debug print statement Co-authored-by: Sasha Abramowitz commit 77f291cf032f842b9cfb36970ca6e5563d01c61f Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Thu Oct 31 15:08:04 2024 +0200 chore: comments for view shapes Co-authored-by: Sasha Abramowitz commit 7944e41b494558b6ded8e2227e9f50aff25784a6 Author: RuanJohn Date: Wed Oct 30 16:49:51 2024 +0200 feat: more rware scenario configs commit 3d36aab988aa36c2fc5e4cdbc7c46ce53e2a8b7d Author: RuanJohn Date: Wed Oct 30 15:58:58 2024 +0200 feat: add vector connector wrapper commit dc00782761f9f5e46d4804ba53b1dd8ebe4eec13 Author: OmaymaMahjoub Date: Wed Oct 30 13:15:05 2024 +0000 fix: fixing the training by adding causal masking of decoder for ff sable commit 2fb21c7c1bb9acf3a29048f154bc1b704e9ae989 Author: OmaymaMahjoub Date: Wed Oct 30 12:23:23 2024 +0000 feat: remove the sable net checker in simple retention commit 7732d52311f473fb698242530fa059b8213dbc8f Merge: ef32a219 6eed2d2f Author: OmaymaMahjoub Date: Wed Oct 30 09:48:04 2024 +0000 feat: merge develop branch commit eea913b8f6e3e6ec39776f714adcdc730b42e10f Merge: cc47103d 6eed2d2f Author: Wiem Khlifi Date: Wed Oct 30 10:23:16 2024 +0100 Merge branch 'develop' into fix/quickstart-notebook commit 6eed2d2fd3b27b55a0d86e7146609eb7b483d584 Merge: 389fbe58 ed3f015c Author: Wiem Khlifi Date: Wed Oct 30 10:22:04 2024 +0100 Merge pull request #1115 from instadeepai/feat/new-dockerfile feat: updated dockerfile commit ed3f015c5d35a0df38b5bd434750a7618c7fe0a1 Merge: 83fa5a9e 389fbe58 Author: Wiem Khlifi Date: Wed Oct 30 10:11:00 2024 +0100 Merge branch 'develop' into feat/new-dockerfile commit 389fbe586e9de425b87fe89ca75bd066849644e2 Merge: 25008fbc 0ec7049d Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 30 11:00:41 2024 +0200 Merge pull request #1107 from instadeepai/feat/implement-mat Add MAT commit 0ec7049d8bc91b87b3f221dfb35f3fe16a271770 Merge: 3d47bebb 25008fbc Author: Ruan de Kock Date: Wed Oct 30 10:03:16 2024 +0200 feat: merge in main commit 25008fbc5af4744be83c2e0ff007812718c27f60 Merge: 8b758133 936c0b8e Author: Sasha Abramowitz Date: Wed Oct 30 09:57:34 2024 +0200 Merge pull request #1105 from instadeepai/feat/hasac2 feat: hasac commit 3d47bebba6610c8d32c5107864831ee96be4d357 Author: Ruan de Kock Date: Wed Oct 30 09:25:23 2024 +0200 feat: swiglu documentation commit 7276aa0c93eb5a336281bb21282fd21af1314d41 Author: Ruan de Kock Date: Wed Oct 30 09:16:15 2024 +0200 feat: execution and training apply types commit d9358311ba4475c8b1df601cba330907cdcb2617 Author: Ruan de Kock Date: Wed Oct 30 09:09:55 2024 +0200 chore: rename embed dim commit a309bfa3b73056a441234238019553b82fe8b916 Author: Ruan de Kock Date: Wed Oct 30 09:03:42 2024 +0200 chore: remove obs dim in MAT network class commit 3cb460d405988a5a9c40d6b664bf1c675f348fc5 Author: Ruan de Kock Date: Wed Oct 30 08:53:45 2024 +0200 chore: config comments and reverts commit ef32a21947e92a9c34d0933f1bd2d308e5159b69 Author: OmaymaMahjoub Date: Tue Oct 29 15:31:37 2024 +0000 feat: compress net params in net_config commit 83fa5a9e2bba5f3a63565163fa9383afb8952685 Author: Sasha Abramowitz Date: Tue Oct 29 17:29:17 2024 +0200 chore: remove docker volumes from makefile Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 936c0b8e5635a371a197cd1d256f2a181445fc59 Merge: cf45f98e 8b758133 Author: Sasha Abramowitz Date: Tue Oct 29 17:25:18 2024 +0200 Merge branch 'develop' into feat/hasac2 commit cc47103d305516203ea0143c7ed96be59331172d Author: Ruan de Kock Date: Tue Oct 29 15:17:36 2024 +0200 chore: remove notebook restarting cells commit 975df5fd8ec2b9c591e325ac32061dd54d4f60a1 Author: Ruan de Kock Date: Tue Oct 29 14:50:37 2024 +0200 docs: mention that we use python 3.10 on colab commit 4376b14a425f8355c89cfd5f392fb9ef919743c9 Author: Ruan de Kock Date: Tue Oct 29 14:48:49 2024 +0200 temp: change dir to quickstart notebook for reviewing commit 71f572cb70efa725ec3e94a2d86fe06a9e8cd878 Merge: 19731683 8b758133 Author: OmaymaMahjoub Date: Tue Oct 29 12:32:20 2024 +0100 merge develop commit 19731683b99e9bec89afff3124c8dd9dd90faa0e Author: OmaymaMahjoub Date: Tue Oct 29 12:24:41 2024 +0100 feat: prevent decay matrix calculation in case of ff sable commit 742903cac149b6ad8b0513961d15bb883c0cb68a Author: OmaymaMahjoub Date: Tue Oct 29 11:46:07 2024 +0100 fix: fixing the retention output indexing commit 2f9dd4edb08a30d8c55cb965079ccca33e1d73d4 Author: Ruan de Kock Date: Tue Oct 29 10:30:02 2024 +0200 fix: update quickstart notebook commit e8b7f57912037214a80c066ac386b9c373364f8f Author: Ruan de Kock Date: Tue Oct 29 09:18:38 2024 +0200 feat: update pyproject commit 8a11bcf1e74193b783feeb834fbd0b2c64309f49 Merge: 5424c663 8b758133 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 19:45:37 2024 +0200 Merge branch 'develop' into feat/new-dockerfile commit cf45f98e7df00906d2abccbd6463e61ef43b6ad0 Author: Sasha Abramowitz Date: Mon Oct 28 19:10:42 2024 +0200 chore: docs commit 7c8b91964e39edddfda078835ee5f7bfba80927e Author: Sasha Abramowitz Date: Mon Oct 28 19:01:57 2024 +0200 chore: docs Co-authored-by: Omayma Mahjoub commit bd4c8bcc32a696e8e268ac5e36ceea44d8d7ea3d Author: Ruan de Kock Date: Mon Oct 28 17:33:41 2024 +0200 chore: pre-commit commit f3c990e222256d8aa8d1630195012e704b33b2bd Merge: fc2b2bd5 8b758133 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 17:08:29 2024 +0200 Merge branch 'develop' into feat/implement-mat commit 8b758133056e86303ab1acbe5aa2ade02e0f6e70 Merge: 54d3b50a 755b4600 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 17:07:26 2024 +0200 Merge pull request #1106 from instadeepai/feat/merge-qmix Add QMIX commit 755b4600db94fe79da7192ad33cda62025d1f9e0 Author: Ruan de Kock Date: Mon Oct 28 16:43:07 2024 +0200 chore: remove type hint commit 880698c203b40c3e9b995ac6b09334856e5d642f Merge: 3c81350f 54d3b50a Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 16:41:40 2024 +0200 Merge branch 'develop' into feat/merge-qmix commit 54d3b50abaa833d805244dc62cf5a9f909948b6a Merge: 87354a38 e9ff8b87 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 16:41:18 2024 +0200 Merge pull request #1109 from instadeepai/feat/pyproject-toml feat: switch to pyproject.toml commit fc2b2bd57b45d810829ce4bf7a702e29c685a5c0 Author: Ruan de Kock Date: Mon Oct 28 16:40:05 2024 +0200 chore: set correct number of keys commit 123f5b19360f07a097abc62eb6b1ea18206d5d79 Author: Ruan de Kock Date: Mon Oct 28 16:36:30 2024 +0200 chore: better action encoder init commit e9ff8b87007e030c5329a8b0413799e7cc8e21dd Author: Sasha Abramowitz Date: Mon Oct 28 16:28:08 2024 +0200 chore: strict zip commit 3cb5bcd9e84ebe47f43ac1780470c0116ad25fb3 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 16:19:00 2024 +0200 chore: add dim on new line Co-authored-by: Sasha Abramowitz commit 1b4cdea028e2e847dbb2a582939651c416b05bd3 Author: Sasha Abramowitz Date: Mon Oct 28 16:18:27 2024 +0200 chore: strict zip Co-authored-by: Wiem Khlifi commit 7f7b2b514a53e914d476e15babfead11b2b9e058 Author: Ruan de Kock Date: Mon Oct 28 15:52:21 2024 +0200 feat: type hint jaxmarl and gigastep env commit 5424c663649b463a3e606c39742c97f32b617116 Author: Sasha Abramowitz Date: Mon Oct 28 15:51:13 2024 +0200 chore: uppercase AS in Dockerfile Co-authored-by: Wiem Khlifi commit 3ecd7723c272c1dd7597d6829d96a5d5948c53e0 Merge: 41467f82 87354a38 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 15:22:27 2024 +0200 Merge branch 'develop' into feat/pyproject-toml commit 65538d5377c1ff9bc5e8661b3fb6beb273e613a0 Merge: 3b686481 87354a38 Author: Sasha Abramowitz Date: Mon Oct 28 15:15:18 2024 +0200 Merge branch 'develop' into feat/implement-mat commit 2cea286ffb41009291262f150b3285184a0f83d6 Merge: 9682bb29 87354a38 Author: Sasha Abramowitz Date: Mon Oct 28 14:09:33 2024 +0200 Merge branch 'develop' into feat/hasac2 commit 9682bb294d592598084b79fd6a909fc7dad3101b Author: Sasha Abramowitz Date: Mon Oct 28 14:09:16 2024 +0200 chore: shape comments commit 1237117e9073beb8ebd7aa92bbe3e76c72d527c3 Author: Sasha Abramowitz Date: Mon Oct 28 14:02:46 2024 +0200 chore: shape comments Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 87354a38fef29d02b21b980a97271412244a791c Author: Wiem Khlifi Date: Mon Oct 28 12:03:53 2024 +0100 fix: fix logging during evaluation for JaxMARL envs (#1116) Co-authored-by: Sasha Abramowitz commit 5aa0c30d4496e8b1d20211c7f8b6662e4c073b35 Merge: 3ff88416 3d541f2d Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 12:18:20 2024 +0200 Merge branch 'develop' into feat/hasac2 commit 3b686481b9b43567e4721c3e58f70794c63c85b2 Merge: 9334319f 3d541f2d Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 12:15:20 2024 +0200 Merge branch 'develop' into feat/implement-mat commit 3c81350ff70d04b32539345b9cbb48916cad30e7 Merge: e49a22f7 3d541f2d Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 12:14:54 2024 +0200 Merge branch 'develop' into feat/merge-qmix commit 3d541f2d85797678da8f154d81199112eacf8f09 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 12:13:41 2024 +0200 Fix lbf and rware obs spec types (#1114) * fix: lbf and rware obs spec types * fix: fix obs spec type in gigastep commit 9334319fcdca7a46a63cd4deb091c18f71c8d7ea Author: Ruan de Kock Date: Mon Oct 28 12:10:18 2024 +0200 chore: more lightweight network configs commit e49a22f7fe633af72e63246cfc4e1bb6f0c751e6 Author: Ruan de Kock Date: Mon Oct 28 12:05:37 2024 +0200 chore: shape comments legend commit f11c21ec515f2dcc04eb91b10aae68afc018e402 Author: Ruan de Kock Date: Mon Oct 28 11:58:21 2024 +0200 chore: corect shape names in the comments commit 36f54d1846bddba918d0c6cd8fd2bf637dcd5122 Author: Sasha Abramowitz Date: Mon Oct 28 11:56:47 2024 +0200 feat: udpated dockerfile commit 98378f3f1554f67e34f00b3f78609afc3a083b73 Author: Ruan de Kock Date: Mon Oct 28 11:40:56 2024 +0200 feat: add MAT network config type commit aff9feb11ed4a84be3558324b9d64a78845369c6 Author: Ruan de Kock Date: Mon Oct 28 11:30:58 2024 +0200 feat: use network for MAT network init commit 66884fb88b31868461b898066102002837edb5bf Author: Ruan de Kock Date: Mon Oct 28 09:39:29 2024 +0200 test: add mat to integration tests commit 738ec3c7049cdfce8a8f205b147e789ad922d9cd Author: Ruan de Kock Date: Mon Oct 28 09:29:15 2024 +0200 feat: add qmix to intergration tests commit c620e17f3a784bea3b1f65d5ba8a79cc3b0be036 Merge: c00f54fd cd31e205 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Mon Oct 28 09:25:03 2024 +0200 Merge branch 'develop' into feat/implement-mat commit 5f3f8e031945bba018d23888e0b9af59951c1c94 Author: Ruan de Kock Date: Mon Oct 28 09:20:31 2024 +0200 chore: rename data variables in training commit fc091890c91f1887cb3087101f01038a04e4cc2a Merge: c80da623 cd31e205 Author: Ruan de Kock Date: Mon Oct 28 09:06:30 2024 +0200 chore: merge in main commit a6ae60296d80ee3d4e5d9b3d486dcfe88e46937e Author: OmaymaMahjoub Date: Sun Oct 27 17:27:21 2024 +0100 fix: minor documentations edits commit 3ec23e80308c95a263f770b3d793bdf82db575f6 Merge: 8c56da70 cd31e205 Author: Omayma Mahjoub Date: Sun Oct 27 17:17:51 2024 +0100 Merge branch 'develop' into feat/sable commit 8c56da7090303181bc546398b012f5795047b480 Author: OmaymaMahjoub Date: Sun Oct 27 16:16:57 2024 +0000 feat: checkpointer update based on MAT PR commit 7601bba98f491ed448f6c1dbf5b545d305b0368d Author: OmaymaMahjoub Date: Sun Oct 27 15:55:54 2024 +0000 feat: add sable to the integration test commit 8abc50104dd0b1df4e487d80217b7d9165a9e20f Author: OmaymaMahjoub Date: Sun Oct 27 15:25:28 2024 +0000 fix: fixing the apply fn output ordering commit 78f99c9fb1d82e642e04af135f63d293a9bebdbf Author: OmaymaMahjoub Date: Sat Oct 26 16:32:24 2024 +0100 fix: minor updates to net config commit c80da6236578e3b2421eedb953de2369d95e59b7 Author: Ruan de Kock Date: Fri Oct 25 18:12:36 2024 +0200 fix: correct spec typing in lbf and rware commit cd31e2056f3a0e59bf31118c11cc53742fb9eb1d Author: Sasha Abramowitz Date: Fri Oct 25 17:23:28 2024 +0200 feat: smaller networks for tests (#1111) * feat: smaller networks and new way to modify test config * feat: faster find_replace Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Co-authored-by: Wiem Khlifi * refactor: move find_replace to test/utils.py * chore: pre-commit --------- Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Co-authored-by: Wiem Khlifi commit 41467f82df0339b609b2da40c62806d7e5443939 Author: Sasha Abramowitz Date: Fri Oct 25 16:54:52 2024 +0200 fix: add unwrapped method to gigastep and jaxmarl wrappers commit 3ff88416e649371081a8e26af2b8f64ad49f4994 Author: Sasha Abramowitz Date: Fri Oct 25 16:38:48 2024 +0200 chore: pre-commit commit 617504bd178e0e9bef476ff1beb7d90256289ad0 Author: Sasha Abramowitz Date: Fri Oct 25 16:27:47 2024 +0200 chore: shape and global state comments commit cb6bb68e092e45b32d0a810400359ca0463f89c6 Author: Ruan de Kock Date: Fri Oct 25 12:00:20 2024 +0200 fix: increase sample sequence length in testing config commit 9a4fcbc6cc23bec3069baf38a0b0dc1d6289af18 Merge: 3b6bd930 bc6eb1a9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Fri Oct 25 11:22:50 2024 +0200 Merge branch 'develop' into feat/hasac2 commit 3043a9d0c3da871f24efb058ebee01da06a71a40 Merge: 3c4ea141 bc6eb1a9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Fri Oct 25 11:20:10 2024 +0200 Merge branch 'develop' into feat/pyproject-toml commit f1549d19a6f90bbd7aa9d226c36be68fffca22b9 Merge: a2d4215a bc6eb1a9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Fri Oct 25 11:19:22 2024 +0200 Merge branch 'develop' into feat/merge-qmix commit c00f54fd84c76cc8b6d6e57359370f07d3cef9b4 Merge: ee3aff6a bc6eb1a9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Fri Oct 25 11:19:10 2024 +0200 Merge branch 'develop' into feat/implement-mat commit a2d4215aeab8e45b6389de9721638b9a0f90ebc1 Author: Ruan de Kock Date: Fri Oct 25 11:17:46 2024 +0200 chore: reset config defaults commit dfdfd3232ec7d8a10ac82eaec47346277c0de910 Author: Ruan de Kock Date: Fri Oct 25 11:15:35 2024 +0200 chore: rename performance variable commit aae973d1758899852e502598d245cffcff09b626 Author: Ruan de Kock Date: Fri Oct 25 11:09:01 2024 +0200 chore: rename data_first and and data_next commit 3c4ea141680341c27956fbb78dfa7049d76066df Author: Sasha Abramowitz Date: Fri Oct 25 11:04:24 2024 +0200 chore: typo Co-authored-by: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> commit 5df0e1e41420d01260d0129871d48673ffd04142 Author: Ruan de Kock Date: Fri Oct 25 11:02:49 2024 +0200 chore: change comment in configs about sequence length commit bc6eb1a9564cac9ffc861fe3e3ce34cd423ea8ad Merge: 57c8e640 dfef2387 Author: Sasha Abramowitz Date: Fri Oct 25 11:00:55 2024 +0200 Merge pull request #1112 from instadeepai/feat/github-actions-uv feat: uv for github actions commit dfef2387f37331e94daa3547eba6ff3a173adaca Author: Sasha Abramowitz Date: Fri Oct 25 09:41:39 2024 +0200 chore: pre-commit autoupdate commit d221a85656ad7f5d592f3a05348d137e40ae62d7 Author: Sasha Abramowitz Date: Fri Oct 25 09:33:10 2024 +0200 feat: uv for github actions commit ee3aff6a54d62ed1fd96215a0a818d137de8dcc1 Author: Ruan de Kock Date: Thu Oct 24 18:04:41 2024 +0200 feat: use model params and optimiser state directly instead of named tuples commit f205b9edc2bcdf1c3188ef016b094d7d95bc6d72 Author: Ruan de Kock Date: Thu Oct 24 16:57:35 2024 +0200 feat: use .at[].set() with drop instead of jax.lax.cond to update shifted actions commit 26654b8a77b6fdfdbfde76ecf8bcd6d2a24cbaab Author: Ruan de Kock Date: Thu Oct 24 16:32:51 2024 +0200 feat: use make mlp method commit 91391c7a8d01b9748ce5c5447bfb3df6825a8fcf Author: Ruan de Kock Date: Thu Oct 24 16:04:45 2024 +0200 chore: output projection commit 32e458ae1185f59567ece1827d8f2fd32230ac9a Author: Ruan de Kock Date: Thu Oct 24 16:03:15 2024 +0200 chore: use capital letters for dimensions commit eee0217b552eb0c010640926db5850f5ef7c19d9 Author: Ruan de Kock Date: Thu Oct 24 15:48:23 2024 +0200 chore: todo about using einops in the future commit 20a10f5515fbc42929df0c036bb119800197237e Author: Ruan de Kock Date: Thu Oct 24 15:25:59 2024 +0200 feat: rename dimensions commit aae87cdbe8e0dd45f43fc1f7c3a1f4cf01c3ce41 Author: Ruan de Kock Date: Thu Oct 24 15:10:55 2024 +0200 chore: pass in less seeds commit 2fc8b929fa21f8fe42219e32ccde1c564df043dd Author: Ruan de Kock Date: Thu Oct 24 14:47:35 2024 +0200 feat: split less keys commit b678bf270d44251901fbf23500200ebe8c589b3d Author: Ruan de Kock Date: Thu Oct 24 14:35:52 2024 +0200 chore: linter commit 80711fd0ada3a5b17252b4d30da7b633cfb198f5 Author: Ruan de Kock Date: Thu Oct 24 14:34:30 2024 +0200 feat: pass in full observation object to network commit 2bd4e2ca31c416a11f5ba1a63b9f592d37084b7b Author: Sasha Abramowitz Date: Thu Oct 24 14:30:46 2024 +0200 feat: switch to pyproject and update mypy rules commit 33117027e998315d6acac9ffd3c86e4b479c05c3 Author: Ruan de Kock Date: Thu Oct 24 14:03:47 2024 +0200 chore: use marlenv type commit db10ce4b005e204c911f104d1cae6017d05852f7 Author: Ruan de Kock Date: Thu Oct 24 14:01:59 2024 +0200 chore: don't check action space type on strings commit eedc8d75aa82d62397ce79b1a0068658b7423c4f Author: Ruan de Kock Date: Thu Oct 24 13:56:45 2024 +0200 chore: rename v_loc to value commit 3688e4021085cc5ba7832904db3a089d27c9cbcd Author: Ruan de Kock Date: Thu Oct 24 13:53:52 2024 +0200 chore: move SwiGLU network to torsos file commit 5e2bbb580ff35786922c38b0a45b2a9d75021be1 Author: Ruan de Kock Date: Thu Oct 24 13:44:24 2024 +0200 chore: expand mask dims without reshape commit 8888a5c96bf926ae484fca1ad41567321ede5203 Author: Ruan de Kock Date: Thu Oct 24 13:36:09 2024 +0200 chore: remove old comments commit 3b6bd93065d9a65befb74653ce4997058ac6b6f5 Author: Sasha Abramowitz Date: Wed Oct 23 17:23:55 2024 +0200 chore: minor fixes from PR review commit 2607db4ef4aec7ea25833dff56996392fcf6c594 Author: Sasha Abramowitz Date: Wed Oct 23 16:16:30 2024 +0200 fix: small logger bug for arrays with a single element commit 388dc6a9f13fd2378a1bf6df122c09779139bf45 Author: Sasha Abramowitz Date: Wed Oct 23 16:16:14 2024 +0200 chore: update default hasac config commit 35db17ddf5eac51826e9ad851114a59587a5c979 Author: Ruan de Kock Date: Wed Oct 23 16:01:08 2024 +0200 chore: slightly more lightweight configs and comment clean up commit 7d5e2393323307580d65161867836580082b2c93 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 15:53:22 2024 +0200 Update mava/configs/system/q_learning/rec_qmix.yaml Co-authored-by: Sasha Abramowitz commit a7e3734958f45919e2346e1ffc06699d5ea7b591 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 15:53:12 2024 +0200 Update mava/configs/env/smax.yaml Co-authored-by: Sasha Abramowitz commit 4fcce3fbdbb0c7868b666ac9995588be8d652f9f Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 15:53:01 2024 +0200 Update mava/configs/system/q_learning/rec_qmix.yaml Co-authored-by: Sasha Abramowitz commit ba71cc58998241eee741bb25d3ede885ead3ad2e Author: Ruan de Kock Date: Wed Oct 23 15:51:17 2024 +0200 chore: fixed update_fn return type commit f6f81e41b4c4b46a5bc176b7019e86860627dcdf Author: Ruan de Kock Date: Wed Oct 23 15:49:46 2024 +0200 feat: paramterise learner state with qmix and qlearning params commit 4f2076b3c9086667eae1750373fd9b1866167c7c Author: Ruan de Kock Date: Wed Oct 23 15:39:30 2024 +0200 feat: store q_error and reuse when logging commit bde58fd30547eab49cbc3eb3e5c6972a04b7237b Author: Ruan de Kock Date: Wed Oct 23 15:35:10 2024 +0200 chore: comment clean up and variable renaming commit 2dcaaceb301c41091a287a5669ea63df3487c7ff Author: Ruan de Kock Date: Wed Oct 23 14:55:34 2024 +0200 chore: whitespace removed commit a19d5fa684b86d69e82512522fc59a7d56a1f02f Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:52:02 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit cce233a2fc19a4ef914c69538e108c90259a8ee8 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:51:03 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit 9294ee859c28acdaf8698a6ad01f3eb3589fda38 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:49:02 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit d8d80f741fa758aa9bc71a7aeb9026b0e3f44f6b Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:48:26 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit 59fe5e2d145b13c839399efb6a6b9606eb1570e8 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:47:41 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit 448495bb25fbe2dcdf3f82b6ed7655a9ce6bc045 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:45:48 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit fece034aa964792f027380eb587f4f72951569d6 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:44:32 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit 9c4aea55a0e111109b1c9f7f2641eccbe22fa68d Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:24:03 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit 7482c8f47d651e10dd1cccb800f726cfa30d8ba9 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:23:35 2024 +0200 Update mava/systems/q_learning/anakin/rec_qmix.py Co-authored-by: Sasha Abramowitz commit f10b2953ad57b5319c33a53a849c6bf150b6b825 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:23:25 2024 +0200 Update mava/networks/torsos.py Co-authored-by: Sasha Abramowitz commit 79f9858ca6cbd81e4e1b5f9acba83a666e3a0d89 Author: Ruan de Kock <33461981+RuanJohn@users.noreply.github.com> Date: Wed Oct 23 14:21:37 2024 +0200 Update mava/configs/system/q_learning/rec_qmix.yaml Co-authored-by: Sasha Abramowitz commit 034859e6a59c5397552847572e6af884a180e076 Author: Ruan de Kock Date: Wed Oct 23 09:18:26 2024 +0200 chore: match exact rec_iql style commit 73c4611144457a0359c8cdb4990461c39fd9fdc8 Author: OmaymaMahjoub Date: Tue Oct 22 17:22:20 2024 +0100 feat: fix pre commits commit 576d5d4b37a92fd48fe6212a82cb0397722606a0 Author: OmaymaMahjoub Date: Tue Oct 22 14:15:23 2024 +0100 feat: remove parallel representation commit 067b4ef33d76d0fdd4a7afcad607d6f7913a6a14 Author: Ruan de Kock Date: Tue Oct 22 14:49:42 2024 +0200 feat: chore pre-commit commit b131cb7425e2114cd7fa8efbfe6d9356bb3afe11 Author: Ruan de Kock Date: Tue Oct 22 14:46:13 2024 +0200 chore: add license commit 61d70ca734d8ab426c9962a4daff57a58bbd6b89 Author: Ruan de Kock Date: Tue Oct 22 14:42:31 2024 +0200 chore: remove unused network file commit 8357ef57c942d50b79f88ed1ccd2b6af197f556a Author: Ruan de Kock Date: Tue Oct 22 14:01:01 2024 +0200 chore: duplicate whole info dict at the same time commit 10647450c9af5b1c8f20f2f2c29648f747257bdc Author: OmaymaMahjoub Date: Tue Oct 22 12:51:41 2024 +0100 feat: clean rec sable system file and fix checkpointer commit 63723c7da531727acfa0e5967c5ffe1ea2aa1ff9 Author: Ruan de Kock Date: Tue Oct 22 12:44:02 2024 +0200 chore: set correct MLP torso size in configs commit 59972c1e95e8ea1affd0781509ed9f3e96bc4a30 Author: OmaymaMahjoub Date: Tue Oct 22 11:37:41 2024 +0100 feat: add util fns for acting and training plus support for chunkwise commit f0dbc65dfe174cd1ff9ee181cd5247c478187448 Author: Ruan de Kock Date: Tue Oct 22 12:35:20 2024 +0200 chore: replace jax.tree_map with jax.tree.map commit 3fb530438f46009c336e225b8c09696821df0495 Author: Ruan de Kock Date: Tue Oct 22 12:31:30 2024 +0200 feat: instantiate networks with hydra utils commit b7353346133df7525f760cb2dda9759f8550cbbc Author: Ruan de Kock Date: Tue Oct 22 10:27:10 2024 +0200 chore: extra comment on term_or_trunc vs terminal commit 63eb99f362fdcebdffc2f669e0ff71bfb45fc90b Author: Ruan de Kock Date: Tue Oct 22 10:24:36 2024 +0200 feat: add option for hard or soft target updates commit 64325d7c5715a29e083d4cf3b7f60fc36b197e16 Author: Sasha Abramowitz Date: Tue Oct 22 10:23:05 2024 +0200 chore: add hasac test commit 5fcfcf2d66ac4e90faa3636dcdc03cd2dd944737 Author: Sasha Abramowitz Date: Tue Oct 22 10:21:46 2024 +0200 chore: add system name commit e13a6e15c698f76f694d94c5a5c0200ea4ba82db Author: Ruan de Kock Date: Tue Oct 22 10:10:48 2024 +0200 chore: type hints commit 3b8d76195d23f45b0954849cd7c04b19929299b0 Author: Ruan de Kock Date: Tue Oct 22 09:53:38 2024 +0200 chore: clean up comments commit 928c9c55fbfdba2c1998280b7d430307680fea4c Author: Sasha Abramowitz Date: Tue Oct 22 09:50:58 2024 +0200 fix: jax utils commit 4915b97e29e7817a0c07ae1e2035bdd0ac6dd72c Author: Ruan de Kock Date: Tue Oct 22 09:45:27 2024 +0200 chore: move types to qlearning types file commit e3195becaf47aec252168e5b6ed0dedb63277a29 Author: Ruan de Kock Date: Tue Oct 22 09:36:37 2024 +0200 chore: move torso and qmix network files commit cefe4da21828c223b73e2eae2cf0d575c87efc8f Author: Ruan de Kock Date: Tue Oct 22 09:19:20 2024 +0200 feat: replace rec_qmix code commit 11546a22d757b095882c961de02cd2b81590b3f4 Merge: 97e23cfe 57c8e640 Author: Ruan de Kock Date: Tue Oct 22 09:15:28 2024 +0200 Merge branch 'develop' into feat/merge-qmix commit 8d85d323004d6e66af4d06a25bef65cf8d985cbd Author: Ruan de Kock Date: Mon Oct 21 17:48:31 2024 +0200 feat: move decoding functions to network utils commit fd09d59704e4a2bd5705a26b091a9523b78ef931 Author: Ruan de Kock Date: Mon Oct 21 16:05:21 2024 +0200 feat: use get_action_head util instead of manually setting action space type commit 6fe1f9c4782b2afd8caa7210fdf2beaa7b528e5a Author: OmaymaMahjoub Date: Mon Oct 21 14:54:31 2024 +0100 feat: add chunkwise timestep fn to the modular net commit 649a70ff6dedb31c1345ba0d1e3d729dfbd11bb3 Merge: 4e3bf428 57c8e640 Author: Ruan de Kock Date: Mon Oct 21 15:41:09 2024 +0200 Merge branch 'develop' into feat/implement-mat commit 519025b705b0a572acebc91801e277511d85b617 Author: Sasha Abramowitz Date: Mon Oct 21 15:22:22 2024 +0200 chore: update config to new mava and cleanup commit f03e6ca79a489dbe8c9a6cf1cf394c81ab59bbdf Author: OmaymaMahjoub Date: Mon Oct 21 14:17:06 2024 +0100 feat: modular net sable commit 0eeaa58ad89073cc51fd64092ab595b6d3a349a5 Merge: eeda7f50 57c8e640 Author: Sasha Abramowitz Date: Mon Oct 21 14:51:10 2024 +0200 Merge branch 'develop' into feat/hasac2 commit 57c8e64059bd59005d80c1c8278eef65855253b9 Merge: c4e40ce2 a14cfb2f Author: Wiem Khlifi Date: Mon Oct 21 13:25:03 2024 +0100 Merge pull request #1104 from instadeepai/feat/act_head feat: set the action head automatically commit eeda7f508e13d1c291dacdd41c5efe074761f4eb Merge: f19d9bc6 c4e40ce2 Author: Sasha Abramowitz Date: Mon Oct 21 13:53:07 2024 +0200 Merge branch 'develop' into feat/hasac2 commit a14cfb2fc3ca7b979a27e4076d168970789dad63 Author: WiemKhlifi Date: Mon Oct 21 11:31:04 2024 +0100 feat: return action type with act head commit 97e23cfe0f8632960f5a61dd29a3e3093916f899 Author: Ruan de Kock Date: Mon Oct 21 11:40:26 2024 +0200 feat: follow old qmix in trainer commit f1cb0f20fd1fcc8871539a18803fa3b61d7b6979 Author: WiemKhlifi Date: Fri Oct 18 17:17:49 2024 +0100 feat: use action_sepc to select action head type commit 59d354fab785db0c9bc761498e0770880e053835 Author: OmaymaMahjoub Date: Fri Oct 18 16:41:42 2024 +0100 feat: add only timestep positional encoding commit 60d8ffa9d7d30829c79692145014a58a69017156 Author: WiemKhlifi Date: Fri Oct 18 15:30:32 2024 +0100 fix: update ff_ippo_store_experience file commit 8ac214a208619aca95763df15c427e8dc140db2a Author: WiemKhlifi Date: Fri Oct 18 15:07:28 2024 +0100 feat: set the action head automatically based on env name commit 4e3bf428f480dc4b801f153e8b6c7e6d7a59273d Author: Ruan de Kock Date: Fri Oct 18 14:21:05 2024 +0200 feat: infer batch size and num agents from obs rep instead of manually passing in commit c18e2339c203138ed429e609d9704b8f05a473a6 Author: Ruan de Kock Date: Fri Oct 18 14:04:04 2024 +0200 chore: comment cleanup commit 5e233b0cc8ff49db49743de3e4706159d7dd4072 Author: Ruan de Kock Date: Thu Oct 17 18:42:44 2024 +0200 feat: continuous actions training commit 562c82a22011f5c1988f77dea935a5e90753eca9 Author: Ruan de Kock Date: Thu Oct 17 18:17:19 2024 +0200 feat: pass key through trainer to prepare for continuous action spaces commit b08388dc261dd9d1c64dc518c14f9570a5ab05b2 Author: Ruan de Kock Date: Thu Oct 17 18:02:43 2024 +0200 feat: squeeze inside of network and not in system run file commit 42b48bb69c70f9a6101afc1f120fd8d2495c1a01 Author: OmaymaMahjoub Date: Thu Oct 17 16:51:15 2024 +0100 fix: minor fix to the positional encoding for timestep commit 13e42a7fe3e4390a3a46f3168f18e16b3d7ac087 Author: OmaymaMahjoub Date: Thu Oct 17 16:28:10 2024 +0100 feat: timestep encoding for rec sable commit e4bc9692667580b3fc9e1a70d9b1fa688f2e1b06 Author: Ruan de Kock Date: Thu Oct 17 16:53:25 2024 +0200 feat: use jax.tree.map instead of deprecated jax.tree_map commit 2d4f7edba23d18ec1ba726e7a0745f3543d479b3 Author: Ruan de Kock Date: Thu Oct 17 16:44:30 2024 +0200 chore: remove redundant obs being passed around commit 848c625cc455428fb4b35c5b43d685beaa517612 Author: Ruan de Kock Date: Thu Oct 17 16:36:57 2024 +0200 feat: prepare to starting using mava discrete action head commit d589b7e3300b9ad15d838f635dd9f7014ae527a1 Author: OmaymaMahjoub Date: Thu Oct 17 12:20:29 2024 +0100 feat: pos encoding setup commit 3fab043212b90875f1f27bb9e2e76289f96e318f Author: OmaymaMahjoub Date: Wed Oct 16 14:59:17 2024 +0100 fix: fix args documentation for learner_fn commit b180ef2486613b8f37559b50e08482883b7ddb45 Author: OmaymaMahjoub Date: Wed Oct 16 14:06:15 2024 +0100 feat: add Sable non memory commit 8d7398a24e77c6b1e6ac8a46c57b1f47da9cd8e3 Author: OmaymaMahjoub Date: Wed Oct 16 12:35:57 2024 +0100 feat: add evaluator to sbale commit 094cc652adcb10dfa91233121655e6bd91ff3724 Author: OmaymaMahjoub Date: Wed Oct 16 11:39:11 2024 +0100 feat: update types used for sable commit b983cca0991a75fcdedc8a564af1e762bc68ec3f Author: OmaymaMahjoub Date: Tue Oct 15 14:55:06 2024 +0100 feat: minor update commit 38af5baa15259587b83a74553a34eba6007a1ad1 Merge: f44e6a5a c4e40ce2 Author: OmaymaMahjoub Date: Tue Oct 15 14:42:14 2024 +0100 feat: merge develop branch commit 4964fa8b36019ad76bc0fd02e75274b4ed126ea4 Merge: c8005cb3 666660b3 Author: Ruan de Kock Date: Tue Oct 15 14:57:59 2024 +0200 feat: merge in network refactor commit f44e6a5ab2164c2d918129e4d02a9613d698260d Author: OmaymaMahjoub Date: Tue Oct 15 13:54:39 2024 +0100 feat: run pre commits commit 6b928c76eda224f6c25be029da23b82c7d4775e2 Author: OmaymaMahjoub Date: Tue Oct 15 13:53:16 2024 +0100 feat: sable clean code and documentation (types still uncorrect commit c8005cb3c6e41260715de1b258b7296b409e68b8 Author: Ruan de Kock Date: Tue Oct 15 12:47:39 2024 +0200 feat: use tfp instead of distrax commit 53dd9d7e939af98bfc8b2dd6c5939d34ca5f41b0 Author: Ruan de Kock Date: Tue Oct 15 12:06:59 2024 +0200 feat: remove autoregressive scans commit ff5ec1030990a219922f35bea1d5289c7471e7a2 Author: Ruan de Kock Date: Tue Oct 15 10:25:26 2024 +0200 feat: use MAT types commit 84f0852088339bfc61c42bba04c601766cefeb85 Author: Ruan de Kock Date: Tue Oct 15 09:31:43 2024 +0200 feat: remove value norm commit 30d29477c3c2957527c8550c3871aac473012fd5 Author: Ruan de Kock Date: Tue Oct 15 09:19:51 2024 +0200 feat: remove huber loss commit 2905604b271fcb1fd8490cfc42382147366f0673 Author: Ruan de Kock Date: Tue Oct 15 09:05:30 2024 +0200 feat: add discrete MAT and training on rware commit 88a619ab21f5e79bb24be30ee7b29c945774331e Author: OmaymaMahjoub Date: Mon Oct 14 17:03:46 2024 +0100 feat: clean util functions commit efcd97528a38a1cdd34bed9613c186fd61a086e6 Author: OmaymaMahjoub Date: Mon Oct 14 16:19:54 2024 +0100 feat: rename sable memory to rec sable commit c15edb06e45b0149bd7d8f2684f10d0ea3845c6f Author: OmaymaMahjoub Date: Mon Oct 14 15:50:33 2024 +0100 feat: add trainable sable system (unclean) to mava commit 8b1860285fba4470d69d6b5646764c6aad477724 Author: Ruan de Kock Date: Mon Oct 14 15:42:20 2024 +0200 feat: set correct sequence length and reward dim in buffer init commit 72b00fdd9438d0afe998a121c475ea4b4893230c Author: OmaymaMahjoub Date: Mon Oct 14 14:31:04 2024 +0100 feat: run pre commits commit 95c12657e6f6b802f81a787a7f95758898aaec2f Author: OmaymaMahjoub Date: Mon Oct 14 14:20:59 2024 +0100 feat: add sable network file commit c7685edb31bd1a126f34f173506e2a15e7d900cb Author: Ruan de Kock Date: Fri Oct 11 15:41:51 2024 +0200 feat: qmix training with new API commit 1c2009308b2a8891913118bdc2875fa2d97d8482 Author: Ruan de Kock Date: Fri Oct 11 12:24:52 2024 +0200 feat: qmix piping through with distributional networks commit b2bd79a267589d9def756a94d21797e3b0730e64 Merge: 43f14e5e 2a1d2d8b Author: Ruan de Kock Date: Fri Oct 11 11:06:48 2024 +0200 feat: merge in develop commit 43f14e5e5a5341ed5f59904252b329f18c4d8e83 Author: Ruan de Kock Date: Fri Oct 11 11:05:53 2024 +0200 feat: qmix with new evaluator piping through commit 8d35f400b270ee23d9e5be05316b30a2ecd8a80b Author: OmaymaMahjoub Date: Thu Oct 10 11:26:57 2024 +0100 feat: add retention file commit e767bd90381a69f58179cc023991044c812c92e8 Author: OmaymaMahjoub Date: Wed Oct 9 12:22:37 2024 +0100 feat: move ff and rnn networks into a folder commit 09d5fdfbbf04aa16c18e1173655d5128cf0aeca7 Author: OmaymaMahjoub Date: Wed Oct 9 12:17:20 2024 +0100 feat: add config files of sable commit f19d9bc6d8a460817723520cefb2d3ea56bbc328 Author: Sasha Abramowitz Date: Wed Aug 7 13:42:53 2024 +0200 fix: optimizers for multiple parameters commit 4673da87ea6f88ac662ea69fa6633cb10b364072 Author: Sasha Abramowitz Date: Wed Aug 7 13:03:08 2024 +0200 feat: grad clip + fix final return commit aad6a0eefd45d9e5f6df75251e2434367cd0fd67 Author: Sasha Abramowitz Date: Wed Aug 7 12:16:04 2024 +0200 fix: evaluator working for hasac commit 658f6277f665b2b437f9ba091b1241cbb9f34d8a Author: Sasha Abramowitz Date: Wed Aug 7 11:46:21 2024 +0200 feat: hasac --- .dockerignore | 26 + .github/workflows/tests_linters.yaml | 28 +- .pre-commit-config.yaml | 8 +- Dockerfile | 48 +- Makefile | 17 +- README.md | 2 +- examples/Quickstart.ipynb | 390 ++++------ mava/__init__.py | 1 + .../ff_ippo_store_experience.py | 19 +- mava/configs/arch/anakin.yaml | 2 +- mava/configs/default/ff_hasac.yaml | 11 + mava/configs/default/ff_ippo.yaml | 2 +- mava/configs/default/ff_isac.yaml | 2 +- mava/configs/default/ff_mappo.yaml | 2 +- mava/configs/default/ff_masac.yaml | 2 +- mava/configs/default/ff_sable.yaml | 11 + mava/configs/default/mat.yaml | 11 + mava/configs/default/rec_qmix.yaml | 11 + mava/configs/default/rec_sable.yaml | 11 + mava/configs/env/scenario/large-4ag-hard.yaml | 14 + mava/configs/env/scenario/large-4ag.yaml | 14 + mava/configs/env/scenario/large-8ag-hard.yaml | 14 + mava/configs/env/scenario/large-8ag.yaml | 14 + .../configs/env/scenario/medium-4ag-hard.yaml | 14 + mava/configs/env/scenario/medium-4ag.yaml | 14 + mava/configs/env/scenario/medium-6ag.yaml | 14 + mava/configs/env/scenario/small-4ag-hard.yaml | 14 + mava/configs/env/scenario/tiny-2ag-hard.yaml | 14 + mava/configs/env/scenario/tiny-4ag-hard.yaml | 14 + .../configs/env/scenario/xlarge-4ag-hard.yaml | 14 + mava/configs/env/scenario/xlarge-4ag.yaml | 14 + mava/configs/env/vector-connector.yaml | 21 + mava/configs/network/cnn.yaml | 3 - mava/configs/network/continuous_mlp.yaml | 17 - mava/configs/network/ff_retention.yaml | 10 + mava/configs/network/mlp.yaml | 3 - mava/configs/network/qmix_rnn.yaml | 19 + mava/configs/network/rcnn.yaml | 3 - mava/configs/network/rec_retention.yaml | 16 + mava/configs/network/rnn.yaml | 3 - mava/configs/network/transformer.yaml | 6 + mava/configs/system/mat/mat.yaml | 25 + mava/configs/system/q_learning/rec_iql.yaml | 2 +- mava/configs/system/q_learning/rec_qmix.yaml | 35 + mava/configs/system/sable/ff_sable.yaml | 23 + mava/configs/system/sable/rec_sable.yaml | 23 + mava/configs/system/sac/ff_hasac.yaml | 40 + mava/evaluator.py | 6 +- mava/networks/__init__.py | 1 + mava/networks/attention.py | 77 ++ mava/networks/base.py | 71 ++ mava/networks/mat_network.py | 279 +++++++ mava/networks/retention.py | 323 ++++++++ mava/networks/sable_network.py | 473 ++++++++++++ mava/networks/torsos.py | 35 +- .../utils/__init__.py} | 2 - mava/networks/utils/mat/__init__.py | 13 + mava/networks/utils/mat/decode.py | 161 ++++ mava/networks/utils/sable/__init__.py | 25 + mava/networks/utils/sable/decode.py | 145 ++++ mava/networks/utils/sable/encode.py | 84 ++ mava/networks/utils/sable/get_init_hstates.py | 43 ++ .../utils/sable/positional_encoding.py | 60 ++ mava/systems/mat/anakin/mat.py | 598 ++++++++++++++ mava/systems/mat/types.py | 51 ++ mava/systems/ppo/anakin/ff_ippo.py | 10 +- mava/systems/ppo/anakin/ff_mappo.py | 10 +- mava/systems/ppo/anakin/rec_ippo.py | 10 +- mava/systems/ppo/anakin/rec_mappo.py | 10 +- mava/systems/q_learning/anakin/rec_iql.py | 124 +-- mava/systems/q_learning/anakin/rec_qmix.py | 689 +++++++++++++++++ mava/systems/q_learning/types.py | 60 +- mava/systems/sable/__init__.py | 13 + mava/systems/sable/anakin/__init__.py | 13 + mava/systems/sable/anakin/ff_sable.py | 669 ++++++++++++++++ mava/systems/sable/anakin/rec_sable.py | 693 +++++++++++++++++ mava/systems/sable/types.py | 79 ++ mava/systems/sac/anakin/ff_hasac.py | 729 ++++++++++++++++++ mava/systems/sac/anakin/ff_isac.py | 39 +- mava/systems/sac/anakin/ff_masac.py | 45 +- mava/types.py | 7 +- mava/utils/checkpointing.py | 23 +- mava/utils/jax_utils.py | 36 +- mava/utils/logger.py | 4 +- mava/utils/make_env.py | 69 +- mava/utils/network_utils.py | 30 + mava/wrappers/__init__.py | 1 + mava/wrappers/gigastep.py | 6 +- mava/wrappers/jaxmarl.py | 4 + mava/wrappers/jumanji.py | 145 +++- pyproject.toml | 80 +- requirements/requirements.txt | 4 +- setup.py | 66 -- test/__init__.py | 13 + test/conftest.py | 61 +- test/integration_test.py | 50 +- test/utils.py | 39 + 97 files changed, 6587 insertions(+), 712 deletions(-) create mode 100644 .dockerignore create mode 100644 mava/configs/default/ff_hasac.yaml create mode 100644 mava/configs/default/ff_sable.yaml create mode 100644 mava/configs/default/mat.yaml create mode 100644 mava/configs/default/rec_qmix.yaml create mode 100644 mava/configs/default/rec_sable.yaml create mode 100644 mava/configs/env/scenario/large-4ag-hard.yaml create mode 100644 mava/configs/env/scenario/large-4ag.yaml create mode 100644 mava/configs/env/scenario/large-8ag-hard.yaml create mode 100644 mava/configs/env/scenario/large-8ag.yaml create mode 100644 mava/configs/env/scenario/medium-4ag-hard.yaml create mode 100644 mava/configs/env/scenario/medium-4ag.yaml create mode 100644 mava/configs/env/scenario/medium-6ag.yaml create mode 100644 mava/configs/env/scenario/small-4ag-hard.yaml create mode 100644 mava/configs/env/scenario/tiny-2ag-hard.yaml create mode 100644 mava/configs/env/scenario/tiny-4ag-hard.yaml create mode 100644 mava/configs/env/scenario/xlarge-4ag-hard.yaml create mode 100644 mava/configs/env/scenario/xlarge-4ag.yaml create mode 100644 mava/configs/env/vector-connector.yaml delete mode 100644 mava/configs/network/continuous_mlp.yaml create mode 100644 mava/configs/network/ff_retention.yaml create mode 100644 mava/configs/network/qmix_rnn.yaml create mode 100644 mava/configs/network/rec_retention.yaml create mode 100644 mava/configs/network/transformer.yaml create mode 100644 mava/configs/system/mat/mat.yaml create mode 100644 mava/configs/system/q_learning/rec_qmix.yaml create mode 100644 mava/configs/system/sable/ff_sable.yaml create mode 100644 mava/configs/system/sable/rec_sable.yaml create mode 100644 mava/configs/system/sac/ff_hasac.yaml create mode 100644 mava/networks/attention.py create mode 100644 mava/networks/mat_network.py create mode 100644 mava/networks/retention.py create mode 100644 mava/networks/sable_network.py rename mava/{version.py => networks/utils/__init__.py} (96%) create mode 100644 mava/networks/utils/mat/__init__.py create mode 100644 mava/networks/utils/mat/decode.py create mode 100644 mava/networks/utils/sable/__init__.py create mode 100644 mava/networks/utils/sable/decode.py create mode 100644 mava/networks/utils/sable/encode.py create mode 100644 mava/networks/utils/sable/get_init_hstates.py create mode 100644 mava/networks/utils/sable/positional_encoding.py create mode 100644 mava/systems/mat/anakin/mat.py create mode 100644 mava/systems/mat/types.py create mode 100644 mava/systems/q_learning/anakin/rec_qmix.py create mode 100644 mava/systems/sable/__init__.py create mode 100644 mava/systems/sable/anakin/__init__.py create mode 100644 mava/systems/sable/anakin/ff_sable.py create mode 100644 mava/systems/sable/anakin/rec_sable.py create mode 100644 mava/systems/sable/types.py create mode 100644 mava/systems/sac/anakin/ff_hasac.py create mode 100644 mava/utils/network_utils.py delete mode 100644 setup.py create mode 100644 test/__init__.py create mode 100644 test/utils.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..b2de8b5cc --- /dev/null +++ b/.dockerignore @@ -0,0 +1,26 @@ +.dockerignore + +.DS_Store +.idea +.vscode + +.git +.github +.gitignore +.gitlab-ci.yml +.gitmodules + +.conda +.neptune +.pytest_cache +.mypy_cache +.ruff_cache + +.pre-commit-config.yaml +commitlint.config.js +LICENSE + +*.egg-info +docs/ +outputs/ +results/ diff --git a/.github/workflows/tests_linters.yaml b/.github/workflows/tests_linters.yaml index d9c4ffc96..440d9aa4f 100644 --- a/.github/workflows/tests_linters.yaml +++ b/.github/workflows/tests_linters.yaml @@ -4,26 +4,36 @@ on: [ pull_request ] jobs: tests-and-linters: - name: "Python ${{ matrix.python-version }} on ${{ matrix.os }}" - runs-on: "${{ matrix.os }}" - timeout-minutes: 10 + name: "Python ${{ matrix.python-version }} on ubuntu-latest" + runs-on: ubuntu-latest + timeout-minutes: 20 strategy: matrix: python-version: ["3.12", "3.11"] - os: [ubuntu-latest] steps: - name: Checkout mava - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + version: "0.4.26" + enable-cache: true + cache-dependency-glob: "requirements/requirements**.txt" # invalidate cache when requirements file changes + + - uses: actions/setup-python@v5 with: python-version: "${{ matrix.python-version }}" - - name: Upgrade pip - run: pip install --upgrade pip + - name: Install python dependencies 🔧 - run: pip install .[dev] + run: uv pip install .[dev] + env: + UV_SYSTEM_PYTHON: 1 + - name: Run linters 🖌️ run: pre-commit run --all-files --verbose + - name: Run tests 🧪 run: pytest -p no:warnings diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fb49feaa3..b848a324c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ -default_stages: [ "commit", "commit-msg", "push" ] +default_stages: [ "pre-commit", "commit-msg", "pre-push" ] default_language_version: python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.4.8 + rev: v0.7.1 hooks: # Run the linter. - id: ruff @@ -16,7 +16,7 @@ repos: types_or: [ python, pyi, jupyter ] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: end-of-file-fixer name: "End of file fixer" @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook - rev: v9.16.0 + rev: v9.18.0 hooks: - id: commitlint name: "Commit linter" diff --git a/Dockerfile b/Dockerfile index baa8c1e4c..e7790d345 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,45 +1,23 @@ -FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 +# Stage 1: Build environment +FROM python:3.12-slim AS core -# Ensure no installs try to launch interactive screen -ARG DEBIAN_FRONTEND=noninteractive +# Add git +RUN apt-get update && apt-get install -y git build-essential pkg-config libhdf5-dev -# Update packages and install python3.9 and other dependencies -RUN apt-get update -y && \ - apt-get install -y software-properties-common git && \ - add-apt-repository -y ppa:deadsnakes/ppa && \ - apt-get install -y python3.12 python3.12-dev python3-pip python3.12-venv && \ - update-alternatives --install /usr/bin/python python /usr/bin/python3.12 10 && \ - python -m venv mava && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* +# Add uv and use the system python (no need to make venv) +USER root +COPY --from=ghcr.io/astral-sh/uv:0.4.20 /uv /bin/uv +ENV UV_SYSTEM_PYTHON=1 -# Setup virtual env and path -ENV VIRTUAL_ENV /mava -ENV PATH /mava/bin:$PATH +WORKDIR /home/app/mava -# Location of mava folder -ARG folder=/home/app/mava - -# Set working directory -WORKDIR ${folder} - -# Copy all code needed to install dependencies -COPY ./requirements ./requirements -COPY setup.py . -COPY README.md . -COPY mava/version.py mava/version.py +COPY . . -RUN echo "Installing requirements..." -RUN pip install --quiet --upgrade pip setuptools wheel && \ - pip install -e . +RUN uv pip install -e . -# Need to use specific cuda versions for jax -ARG USE_CUDA=true +ARG USE_CUDA=false RUN if [ "$USE_CUDA" = true ] ; \ - then pip install "jax[cuda12]==0.4.30" ; \ + then uv pip install jax[cuda12]==0.4.30 ; \ fi -# Copy all code -COPY . . - EXPOSE 6006 diff --git a/Makefile b/Makefile index 3f005d6cd..27b8f6f06 100755 --- a/Makefile +++ b/Makefile @@ -1,19 +1,8 @@ -# Check if GPU is available -NVCC_RESULT := $(shell which nvcc 2> NULL) -NVCC_TEST := $(notdir $(NVCC_RESULT)) -ifeq ($(NVCC_TEST),nvcc) -GPUS=--gpus all -else -GPUS= -endif - -# For Windows use CURDIR -ifeq ($(PWD),) -PWD := $(CURDIR) -endif +# Check if GPU is available - if `nvidia-smi` works then use GPUs +GPUS := $(shell command -v nvidia-smi > /dev/null && nvidia-smi > /dev/null 2>&1 && echo "--gpus all" || echo "") # Set flag for docker run command -BASE_FLAGS=-it --rm -v ${PWD}:/home/app/mava -w /home/app/mava +BASE_FLAGS=-it --rm RUN_FLAGS=$(GPUS) $(BASE_FLAGS) DOCKER_IMAGE_NAME = mava diff --git a/README.md b/README.md index b9b823edc..4285080d7 100644 --- a/README.md +++ b/README.md @@ -159,7 +159,7 @@ cd mava pip install -e . ``` -We have tested `Mava` on Python 3.11 and 3.12, but earlier versions may also work. Note that because the installation of JAX differs depending on your hardware accelerator, +We have tested `Mava` on Python 3.11 and 3.12, but earlier versions may also work. Specifically, we use Python 3.10 for the Quickstart notebook on Google Colab since Colab uses Python 3.10 by default. Note that because the installation of JAX differs depending on your hardware accelerator, we advise users to explicitly install the correct JAX version (see the [official installation guide](https://github.com/google/jax#installation)). For more in-depth installation guides including Docker builds and virtual environments, please see our [detailed installation guide](docs/DETAILED_INSTALL.md). ## Quickstart ⚡ diff --git a/examples/Quickstart.ipynb b/examples/Quickstart.ipynb index bcc11b58a..7febf6140 100644 --- a/examples/Quickstart.ipynb +++ b/examples/Quickstart.ipynb @@ -25,9 +25,12 @@ "id": "a99IjmO51uP2" }, "source": [ - "### This notebook offers a simple introduction to [Mava](https://github.com/instadeepai/Mava) by showing how to build and train a multi-agent PPO (MAPPO) system on the RobotWarehouse environment from [Jumanji](https://github.com/instadeepai/jumanji). Mava follows the design philosophy of [CleanRL](https://github.com/vwxyzjn/cleanrl) allowing for easy code readability and reuse, and is built on top of code from [PureJaxRL](https://github.com/luchris429/purejaxrl), extending it to provide end-to-end JAX-based multi-agent algorithms.\n", + "### This notebook offers a simple introduction to [Mava](https://github.com/instadeepai/Mava) by showing how to build and train a multi-agent PPO (MAPPO) system on the RobotWarehouse environment from [Jumanji](https://github.com/instadeepai/jumanji). Mava follows the design philosophy of [CleanRL](https://github.com/vwxyzjn/cleanrl) allowing for easy code readability and reuse, and is built on top of code from [PureJaxRL](https://github.com/luchris429/purejaxrl), extending it to provide end-to-end JAX-based multi-agent algorithms. \n", "\n", - "\"Open\n" + "> #### Note\n", + "> This notebook is meant as an introduction to how systems are created in Mava and in general we highly recommend using the python files inside `mava/systems/` as these are the most performant and up to date.\n", + "\n", + "\"Open\n" ] }, { @@ -45,72 +48,27 @@ "cell_type": "code", "execution_count": null, "metadata": { - "cellView": "form", - "id": "5l-eEkH-2f0D" + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "5l-eEkH-2f0D", + "outputId": "0aa8544b-7697-46c3-e605-a5cbf92eae0b" }, "outputs": [], "source": [ "%%capture\n", - "# @title Install Mava\n", - "! pip install git+https://github.com/instadeepai/mava.git@develop\n", - "! pip install \"jax[cuda12_pip]\" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IMBnurbl-9Ez" - }, - "source": [ - "Restarting the runtime is necessary after reinstalling JAX in Colab to ensure that the changes take effect and that the runtime environment is properly configured for the updated JAX version." + "# @title Install required packages\n", + "! pip install git+https://github.com/instadeepai/mava.git@develop" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "cellView": "form", - "id": "2pMV4rGjTQAw" - }, - "outputs": [], - "source": [ - "# @title Restart Google Colab runtime\n", - "import os\n", - "\n", - "os.kill(os.getpid(), 9)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "cellView": "form", "id": "FjXA8JyI1_YW" }, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "[(0.00392156862745098, 0.45098039215686275, 0.6980392156862745),\n", - " (0.8705882352941177, 0.5607843137254902, 0.0196078431372549),\n", - " (0.00784313725490196, 0.6196078431372549, 0.45098039215686275),\n", - " (0.8352941176470589, 0.3686274509803922, 0.0),\n", - " (0.8, 0.47058823529411764, 0.7372549019607844),\n", - " (0.792156862745098, 0.5686274509803921, 0.3803921568627451),\n", - " (0.984313725490196, 0.6862745098039216, 0.8941176470588236),\n", - " (0.5803921568627451, 0.5803921568627451, 0.5803921568627451),\n", - " (0.9254901960784314, 0.8823529411764706, 0.2),\n", - " (0.33725490196078434, 0.7058823529411765, 0.9137254901960784)]" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# @title Import required packages.\n", "\n", @@ -140,8 +98,8 @@ "from optax._src.base import OptState\n", "\n", "# Mava Helpful functions and types\n", - "from mava.distributions import IdentityTransformation\n", - "from mava.evaluator import get_eval_fn\n", + "from mava.networks.distributions import IdentityTransformation\n", + "from mava.evaluator import get_eval_fn, make_ff_eval_act_fn\n", "from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition\n", "from mava.types import (\n", " ActorApply,\n", @@ -159,17 +117,23 @@ "from mava.utils.training import make_learning_rate\n", "from mava.wrappers import (\n", " AgentIDWrapper,\n", - " AutoResetWrapper,\n", - " RecordEpisodeMetrics,\n", " RwareWrapper,\n", ")\n", + "from jumanji.environments.routing.robot_warehouse.generator import (\n", + " RandomGenerator as RwareRandomGenerator,\n", + ")\n", + "from mava.utils import make_env as environments\n", "\n", "%matplotlib inline\n", "import seaborn as sns\n", "\n", "sns.set()\n", "sns.set_style(\"white\")\n", - "sns.color_palette(\"colorblind\")" + "sns.color_palette(\"colorblind\")\n", + "\n", + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -193,18 +157,16 @@ "Initially, we start by constructing the Actor and Critic networks using components from the Flax library.\n", "\n", "* The `Actor()` network takes an observation as input and produces logits representing the probabilities of different actions. The shapes within the network are determined dynamically based on the number of agents, the observation, and the batch size.\n", - "* The `Critic()` network takes the global state as input and produces the estimated value of the state. Similar to the Actor network, the shapes within the network are handled implicitly by Flax." + "* The `Critic()` network takes the global state as input and produces the estimated value of the state. Similar to the Actor network, the shapes within the network are handled implicitly by Flax.\n", + "\n", + "Note: that in Mava we have utility functions that will construct this network for you through the config, we explicitly create the networks here as an example." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Sss6opmC6lmp", - "outputId": "7eb3833d-d44b-4218-c9f4-aae8aa2e447c" + "id": "Sss6opmC6lmp" }, "outputs": [], "source": [ @@ -255,12 +217,12 @@ }, "source": [ "### Learner Function\n", - "The `get_learner_fn` function returns a function which produces an `ExperimentOutput`, encapsulating the updated learner state, episode information, and loss metrics." + "The `get_learner_fn` returns the entire act-learn loop. `_env_step` is the acting, while `_update_epoch` does the learning. We do this in a single function so it is easy to `jit`/`vmap`/`pmap`, so that all acting and learning can be done on an accelerator." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": { "id": "4VVjKmgW64Ct" }, @@ -570,12 +532,12 @@ }, "source": [ "### Learner Setup\n", - "The learner setup initialises components for training: the learner function, actor and critic networks and optimizers, environment, and states. It creates a function for learning, employs parallel processing over the cores for efficiency, and sets up initial states." + "The learner setup initialises components for training: the learner function (above), actor and critic networks and optimizers and environment states. It also `pmap`s the learner function so that it is able to be run across multiple TPU cores." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": { "id": "eWjNSGvZ7ALw" }, @@ -682,27 +644,28 @@ }, "source": [ "### Rendering\n", - "The `render_one_episode` function simulates and visualises one episode from rolling out a trained MAPPO model that will be passed to the function using `actors_params`." + "The `render_one_episode` function simulates and visualises one episode from rolling out a trained MAPPO model." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": { "id": "DU7OVSm6HM6q" }, "outputs": [], "source": [ "def render_one_episode(config, params, max_steps=100) -> None:\n", - " \"\"\"Rollout episdoes of a trained MAPPO.\"\"\"\n", + " \"\"\"Rollout episodes of a trained MAPPO policy.\"\"\"\n", " # Create envs\n", - " env = jumanji.make(config[\"env\"][\"env_name\"])\n", - " env = RwareWrapper(env, add_global_state=True)\n", - " # Add agent id to observation.\n", - " if config[\"system\"][\"add_agent_id\"]:\n", - " env = AgentIDWrapper(env=env)\n", - "\n", - " # Create actor networks (We only care about the policy during the rendering)\n", + " env_config = {**config.env.kwargs, **config.env.scenario.env_kwargs}\n", + " generator = RwareRandomGenerator(**config.env.scenario.task_config)\n", + " env = jumanji.make(config.env.scenario.name, generator=generator, **env_config)\n", + " env = RwareWrapper(env)\n", + " if config.system.add_agent_id:\n", + " env = AgentIDWrapper(env)\n", + "\n", + " # Create actor networks (We only care about the policy during rendering)\n", " actor_network = Actor(env.action_dim)\n", " apply_fn = actor_network.apply\n", "\n", @@ -747,26 +710,25 @@ }, "source": [ "### Logging:\n", - "The `plot_performance` function visualises the performance of the algorithm, this plot will be refreshed each time evaluation interval happens!" + "The `plot_performance` function visualises the performance of the algorithm. This plot will be refreshed each time evaluation interval happens!" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": { "id": "OwkZqb8y8GYG" }, "outputs": [], "source": [ - "def plot_performance(episode_metrics, ep_returns, start_time):\n", + "def plot_performance(mean_episode_return, ep_returns, start_time):\n", " plt.figure(figsize=(8, 4))\n", " clear_output(wait=True)\n", "\n", " # Plot the data\n", - " ep_returns.append(episode_metrics[\"episode_return\"].mean())\n", + " ep_returns.append(mean_episode_return)\n", " plt.plot(\n", - " np.linspace(0, (time.time() - start_time) / 60.0, len(list(ep_returns))),\n", - " list(ep_returns),\n", + " np.linspace(0, (time.time() - start_time) / 60.0, len(list(ep_returns))), list(ep_returns)\n", " )\n", " plt.xlabel(\"Run Time [Minutes]\")\n", " plt.ylabel(\"Episode Return\")\n", @@ -823,7 +785,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": { "id": "wexJ0Slr8INC" }, @@ -853,11 +815,27 @@ " \"num_eval_episodes\": 32,\n", " \"num_evaluation\": 50,\n", " \"evaluation_greedy\": False,\n", + " \"num_absolute_metric_eval_episodes\": 32,\n", " },\n", " \"env\": {\n", - " \"env_name\": \"RobotWarehouse-v0\",\n", + " \"env_name\": \"RobotWarehouse\",\n", " \"eval_metric\": \"episode_return\",\n", + " \"implicit_agent_id\": False,\n", " \"log_win_rate\": False,\n", + " \"kwargs\": {\"time_limit\": 500},\n", + " \"scenario\": {\n", + " \"name\": \"RobotWarehouse-v0\",\n", + " \"task_name\": \"tiny-4ag-easy\",\n", + " \"task_config\": {\n", + " \"column_height\": 8,\n", + " \"shelf_rows\": 1,\n", + " \"shelf_columns\": 3,\n", + " \"num_agents\": 4,\n", + " \"sensor_range\": 1,\n", + " \"request_queue_size\": 8,\n", + " },\n", + " \"env_kwargs\": {},\n", + " },\n", " },\n", "}\n", "# Convert the Python dictionary to a DictConfig\n", @@ -870,7 +848,7 @@ "id": "sub4CAfrLHbM" }, "source": [ - "#### Define Training and Evaluation environments" + "#### Create the Training and Evaluation environments" ] }, { @@ -879,87 +857,18 @@ "id": "dwMHRotOLmdT" }, "source": [ - "We use a series of wrappers to configure the training and evaluation environments, each with distinct purposes, described as follows:\n", - "\n", - "`RwareWrapper`: A wrapper for training and evaluating the environment of a robotic warehouse using the Mava system.\n", - "\n", - "`GlobalStateWrapper`: This wrapper includes a global environment state to be used by the centralised critic. It's worth noting that since robotic warehouse does not have a global state, we create one by concatenating the observations of all agents.\n", - "\n", - "`AutoResetWrapper`: This wrapper automatically resets the environment after a completed episode. Once a terminal state is attained, the state, observation, and step type are reset in readiness for subsequent interactions.\n", - "\n", - "`RecordEpisodeMetrics`: This wrapper contributes to the logging process by capturing episode returns and lengths during the episode step invocation.\n", - "\n", - "`AgentIDWrapper`: This wrapper adds one-hot agent IDs to agent observations." + "We use Mava's utility functions to create our environments for us. These environments will have a seuqnece of wrappers applied to them that will add agent identifiers and will log any relevant metrics. Since MAPPO has a centralised critic, we will also need the environment to return the true underlying environment state along with the individual agent observations. This is why we pass in `add_global_state=True`. FOr more information on all the wrappers that are applied, please see [here](https://github.com/instadeepai/Mava/blob/8b758133056e86303ab1acbe5aa2ade02e0f6e70/mava/utils/make_env.py#L86)." ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 11, "metadata": { "id": "lCqZohi0vKSR" }, "outputs": [], "source": [ - "# Set up a Jumanji environment for training.\n", - "env = jumanji.make(config[\"env\"][\"env_name\"])\n", - "env = RwareWrapper(env, add_global_state=True)\n", - "\n", - "# Set up a Jumanji environment for evaluation.\n", - "eval_env = jumanji.make(config[\"env\"][\"env_name\"])\n", - "eval_env = RwareWrapper(eval_env, add_global_state=True)\n", - "\n", - "# Add agent id to observation.\n", - "if config[\"system\"][\"add_agent_id\"]:\n", - " env = AgentIDWrapper(env=env)\n", - " eval_env = AgentIDWrapper(env=eval_env)\n", - "\n", - "# The eval env runs for one episode so it doesn't need to be auto reset\n", - "env = AutoResetWrapper(env)\n", - "\n", - "env = RecordEpisodeMetrics(env)\n", - "eval_env = RecordEpisodeMetrics(eval_env)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "PrFx-V-DNUkN" - }, - "source": [ - "#### The Learner and Evaluator Setup\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": { - "id": "gp-FLgLSNg29" - }, - "outputs": [], - "source": [ - "# PRNG keys.\n", - "key, key_e, actor_net_key, critic_net_key = jax.random.split(\n", - " jax.random.PRNGKey(config.system.seed), num=4\n", - ")\n", - "\n", - "# Setup learner.\n", - "learn, actor_network, learner_state = learner_setup(\n", - " env, (key, actor_net_key, critic_net_key), config\n", - ")\n", - "\n", - "\n", - "# Setup evaluator.\n", - "# The evaluator needs a function that given params and an observation returns an action\n", - "def eval_act_fn(params: FrozenDict, timestep, key, actor_state):\n", - " del actor_state\n", - " pi = actor_network.apply(params, timestep.observation)\n", - " action = pi.mode() if config.arch.evaluation_greedy else pi.sample(seed=key)\n", - " return action, {}\n", - "\n", - "\n", - "# Pass the above function, the environment and the config to create the evaluator function\n", - "evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False)" + "env, eval_env = environments.make(config, add_global_state=True)" ] }, { @@ -1003,27 +912,65 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 12, "metadata": { "id": "XeqzRKVPxP2F" }, "outputs": [], "source": [ - "# Calculate total timesteps.\n", - "n_devices = len(jax.devices())\n", - "config[\"system\"][\"num_updates_per_eval\"] = (\n", - " config[\"system\"][\"num_updates\"] // config[\"arch\"][\"num_evaluation\"]\n", + "def compute_total_timesteps(config: DictConfig):\n", + " # Calculate total timesteps.\n", + " n_devices = len(jax.devices())\n", + " config[\"system\"][\"num_updates_per_eval\"] = (\n", + " config[\"system\"][\"num_updates\"] // config[\"arch\"][\"num_evaluation\"]\n", + " )\n", + " steps_per_rollout = (\n", + " n_devices\n", + " * config[\"system\"][\"num_updates_per_eval\"]\n", + " * config[\"system\"][\"rollout_length\"]\n", + " * config[\"system\"][\"update_batch_size\"]\n", + " * config[\"arch\"][\"num_envs\"]\n", + " )\n", + "\n", + " return steps_per_rollout, config" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PrFx-V-DNUkN" + }, + "source": [ + "#### The Learner and Evaluator Setup\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "gp-FLgLSNg29" + }, + "outputs": [], + "source": [ + "# PRNG keys.\n", + "key, key_e, actor_net_key, critic_net_key = jax.random.split(\n", + " jax.random.PRNGKey(config.system.seed), num=4\n", ")\n", - "steps_per_rollout = (\n", - " n_devices\n", - " * config[\"system\"][\"num_updates_per_eval\"]\n", - " * config[\"system\"][\"rollout_length\"]\n", - " * config[\"system\"][\"update_batch_size\"]\n", - " * config[\"arch\"][\"num_envs\"]\n", + "\n", + "# Setup learner.\n", + "learn, actor_network, learner_state = learner_setup(\n", + " env, (key, actor_net_key, critic_net_key), config\n", ")\n", "\n", - "# Run experiment for a total number of evaluations.\n", - "ep_returns = []" + "eval_act_fn = make_ff_eval_act_fn(actor_network.apply, config)\n", + "\n", + "# Setup evaluator.\n", + "evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False)\n", + "absolute_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=True)\n", + "\n", + "# Add total timesteps to the config and compute environment steps per rollout.\n", + "steps_per_rollout, config = compute_total_timesteps(config)" ] }, { @@ -1050,36 +997,29 @@ "id": "SOMJZaDGbx8P" }, "source": [ - "Now that the code has been compiled using JAX, its execution will benefit from optimised performance. We will proceed to train the MAPPO algorithm on the `small-4ag-easy` scenario from RobotWarehouse. The experiment follows a cyclic pattern, transitioning from training to evaluation and back to training.\n", + "Now that the code has been compiled using JAX notice how fast we can run a simple experiment. We will train the MAPPO algorithm on the `small-4ag-easy` scenario from RobotWarehouse. The training follows a cyclic pattern, transitioning from training to evaluation and back to training.\n", "\n", - "The training phase consists of performing 400 updates. Each update utilizes 512 parallel environments, with a rollout of 128 steps per environment and a batch of two vectorised full gradient update steps are performend. This comprehensive process results in over 50 million timesteps utilised for training." + "The training phase consists of performing 400 updates. Each update utilizes 512 parallel environments, with a rollout of 128 steps per environment and a batch of two vectorised full gradient update steps are performend. This results in over 50 million timesteps available for training." ] }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAArgAAAGSCAYAAAAIH7LiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/xnp5ZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB7yUlEQVR4nO3deVhUZfsH8O/MsO+LgLIpoCzKjoi4JmrumqVp5pKZqWmlb71l9Wb7z+qtt9KsTHM3l3JJDcktN1xxARdwAdkFZN+3mfP7Y2RyBBSGgYHh+7kuLp1zzpxzz4Byz3Pu535EgiAIICIiIiLSEmJNB0BEREREpE5McImIiIhIqzDBJSIiIiKtwgSXiIiIiLQKE1wiIiIi0ipMcImIiIhIqzDBJSIiIiKtwgSXiIiIiLQKE1wiIiIi0ipMcIkIy5cvh4eHB3Jzc5v9WmfPnoWHhwfOnj3b7NdqTTw8PPDxxx9rOoxWJSwsDIsXL27wsXPmzGnmiIhIWzDBJWpjdu7cCQ8PD8VX9+7d0b9/fyxevBiZmZmaDk9h8+bN2LlzZ7OdPyYmBh4eHli3bl2tffPmzYOHhwd27NhRa9/zzz+P/v37N1tcpLrbt29j+fLlSE1NbdbrJCcnw8fHBx4eHrhy5Uqjnnvs2DF4eHigX79+kMlkzRSh6vbu3Vvnvwmi9oYJLlEb9dprr+HLL7/ERx99hAEDBmDPnj2YOnUqKioqNB0aAGDLli3YtWtXre3BwcGIiYlBcHBwk87fvXt3GBoa4sKFC7X2Xbp0CTo6Orh48aLS9srKSly5cgWBgYFNujapR0REBD755BPF49u3b+P7779HWlpas173//7v/6Cjo6PSc/fs2QMHBwfcu3cPZ86cUXNkTbdv3z5s2LBB02EQaRwTXKI2asCAARg3bhwmTpyIzz77DC+++CKSk5Nx+PBhTYf2SGKxGPr6+hCLm/bfj46ODnx9fWslsQkJCcjLy8Pw4cNrJb/Xrl1DRUUFgoKCmnRtACgrK2vyOdo7PT096Orqtug1T5w4gZMnT2LGjBmNfm5paSmOHDmCmTNnonv37ti7d28zREhE6sAEl0hL9OzZEwCQkpKitP306dOYMmUK/P390bNnT8ybNw/x8fF1niMvLw+vv/46AgMDERISgk8//bTWiHB1dTVWrFiBIUOGwNvbG2FhYfjf//6HyspKxTFhYWG4desWzp07pyilmDZtGoD6a3Cjo6Mxe/ZsBAcHw9/fH2PGjMH69esf+ZqDgoKQnZ2NpKQkxbaLFy/CxMQEkyZNwp07d5TqimuS4ZoE99ChQ3j55ZfRr18/eHt7Y8iQIVixYgWkUqnSdaZNm4bRo0fj6tWreP755+Hn54f//e9/AOSjwsuWLcPQoUPh7e2NgQMH4ssvv1R6Px506NAhjB49Gt7e3hg1ahSOHz9e65jr16/jpZdeQmBgIAICAjBjxgxcvnxZ6ZiauumH1ZSwPHib/8qVK5g1axZCQkLg6+uLsLAwvPPOO0rPk8lkWLduHUaNGgUfHx/06dMHS5YsQUFBQZ2vo8bhw4fh4eGBuLg4xba//voLHh4eWLBggdKxI0aMwMKFCxWPH6zB3blzJ15//XUAwPTp0xU/Nw//nERFRWHChAnw8fHB4MGDsXv37kfG96Cqqip89tlnmD59OpydnRv8vBoHDx5EeXk5hg8fjpEjR+LAgQN13jEpLy/Hp59+ipCQEAQEBGDu3LnIzMyEh4cHli9frnRsZmYm3nnnHfTp00fxM/H7778rHVPzbyY8PBw//vgjBgwYAB8fH8yYMUPpZ3/atGk4evQo0tLSFO9fWFiYYv/GjRsxatQo+Pn5ITg4GE8//TSTdNJaqt2jIaJWp+a2rpmZmWLbqVOnMHv2bDg6OmLBggUoLy/Hpk2b8Nxzz2Hnzp1wdHRUOsfChQvh4OCAN954A5cvX8bGjRtRWFiIL7/8UnHMf/7zH+zatQvDhg3DzJkzERMTg5UrVyI+Ph4rVqwAALz77rv45JNPYGRkhLlz5wIAOnToUG/skZGRmDNnDmxtbTF9+nR06NAB8fHxOHr06CNH2moS1QsXLqBz584A5Emsv78//Pz8oKuri0uXLmHw4MGKfcbGxvD09AQA7Nq1C0ZGRpg5cyaMjIxw5swZLFu2DMXFxXj77beVrpWfn4/Zs2dj1KhRGDt2LKytrSGTyTBv3jxcuHABzz77LNzc3HDz5k2sX78eiYmJ+OGHH5TOceHCBRw4cABTpkyBsbExNm7ciNdeew1///03LC0tAQC3bt3C888/D2NjY7z00kvQ0dHBtm3bMG3aNGzatAl+fn71vh91ycnJwaxZs2BpaYmXX34ZZmZmSE1NxcGDB5WOW7JkCXbt2oWnn34a06ZNQ2pqKjZv3ozr169jy5Yt9Y60BgUFQSQSISoqSvG+RkVFQSwWK42g5+bmIiEhAVOnTq3zPMHBwZg2bRo2btyIuXPnwtXVFQDg5uamOCYpKQmvv/46JkyYgPHjx2PHjh1YvHgxevTogW7duj32vVi/fj0KCwvxyiuv4MCBA489/mF79+5FSEgIbGxsMGrUKHz99dc4cuQIRowYoXTc4sWLsX//fowbNw5+fn44f/48Xn755Vrny87OxrPPPguRSITnn38eVlZWOH78ON577z0UFxfjhRdeUDp+1apVEIlEePHFF1FcXIzVq1fjzTffxG+//QYAmDt3LoqKipCRkaH4AGNsbAwA2L59Oz799FMMGzYM06dPR0VFBW7cuIHo6GiMGTOm0e8FUasnEFGbsmPHDsHd3V04deqUkJOTI9y9e1eIiIgQevfuLXh7ewt3795VHDtu3DghNDRUyMvLU2yLjY0VPD09hbfeekuxbdmyZYK7u7swd+5cpWt9+OGHgru7uxAbG6t4rru7u/Dee+8pHff5558L7u7uwunTpxXbRo0aJUydOrVW/GfOnBHc3d2FM2fOCIIgCNXV1UJYWJgwaNAgoaCgQOlYmUz2yPeiqKhI8PLyEt59913FtmHDhgnLly8XBEEQJkyYIHzxxReKfb179xZmzpypeFxWVlbrnO+//77g5+cnVFRUKLZNnTpVcHd3F7Zs2aJ07O7duwVPT0/h/PnzStu3bNkiuLu7CxcuXFBsc3d3F3r06CEkJSUpttW8nxs3blRse+WVV4QePXoIycnJim2ZmZlCQECA8Pzzzyu21XzPHlbz85GSkiIIgiAcPHhQcHd3F2JiYmodW+P8+fOCu7u7sGfPHqXtx48fr3P7w0aNGiW8/vrrisfjx48XXnvtNcHd3V24ffu2IAiCcODAAaWfJUEQhEGDBglvv/224vH+/fuVfjYeNGjQIMHd3V3pvc7JyRG8vb2Fzz///JHxCYIgZGVlCQEBAcLWrVsFQfjnfXrU+/Kg7OxsoXv37sL27dsV2yZNmiTMmzdP6birV68K7u7uwmeffaa0ffHixYK7u7uwbNkyxbZ3331X6Nu3r5Cbm6t07KJFi4SgoCDFz2fNv5kRI0Yo/VyuX79ecHd3F27cuKHY9vLLLwuDBg2qFf+8efOEUaNGNei1EmkDligQtVEvvPACQkNDMXDgQLz22mswNDTEjz/+iI4dOwIAsrKyEBsbi/Hjx8PCwkLxPE9PT/Tp0wfHjh2rdc7nn39e6XHNaFvNbfSa58ycOVPpuBdffFFpf2Ncv34dqampmD59utLoMwCIRKJHPtfExAQeHh6KkcLc3FzcuXNHMYksMDBQUZZQU67w4AQzAwMDxd+Li4uRm5uLnj17oqysDAkJCUrX0tPTw9NPP620LSIiAm5ubnB1dUVubq7iq3fv3gBQ6/Z6nz59lG6Ne3p6wsTERFFWIpVKERkZiSFDhsDJyUlxnK2tLUaPHo0LFy6guLj4ke/Jw0xNTQEAR48eRVVVVZ3HREREwNTUFH379lV6HT169ICRkdFjW7oFBQUhKioKgPx9jIuLw6RJk2Bpaan43kRFRcHMzAzu7u6Niv9BXbt2VZTiAICVlRVcXFxqleXU5auvvoKTkxMmTpyo0rX//PNPiEQiPPnkk4pto0ePxvHjx5XKOE6cOAEAmDJlitLzHx65FgQBBw4cQFhYGARBUHrf+/Xrh6KiIly7dk3pOU8//TT09PQUj+srS6qLmZkZMjIyEBMT08BXTNS2sUSBqI1asmQJXFxcUFRUhB07duD8+fNKv/zS09MBAC4uLrWe6+bmhpMnT6K0tBRGRkaK7TW3+Ws4OztDLBYr6jnT0tIgFotr1S/a2NjAzMxMpdnvNb+cVU18goKCsHHjRuTm5uLSpUuQSCSK2/gBAQH49ddfUVlZWav+FpCXA3z77bc4c+ZMrcSxqKhI6bGdnZ3S+wvIb5nHx8cjNDS0zthycnKUHnfq1KnWMebm5igsLAQgT9DLysrq/Z7JZDLcvXu3Qbfja/Tq1QvDhg3D999/j3Xr1qFXr14YMmQIxowZo3g9SUlJKCoqavDreFjPnj2xdetWJCUlITk5GSKRSFHzHRUVhWeffRZRUVEIDAxs0uTC+t6/x9UJX758GX/88QfWrVun8vX37NkDX19f5OfnIz8/HwDg5eWFqqoqREREYNKkSQDk/+7EYnGt8p+H/23l5uaisLAQ27Ztw7Zt2+q85sN9qe3t7ZUe13wgrPn5eZTZs2fj1KlTmDhxIjp37oy+ffti9OjRaplwSdQaMcElaqN8fX3h4+MDABgyZAimTJmCN954AxEREYq6u6aqbwT1cSOrLSkwMBAbN27ExYsXcenSJbi7uytef0BAACorKxETE4MLFy5AR0cH/v7+AORJwdSpU2FiYoLXXnsNzs7O0NfXx7Vr1/DVV1/V6nH64GhvDZlMBnd391oTtmrUjKbXkEgkdR4nCEJjX3a934OHJ8iJRCIsW7YMly9fxt9//40TJ07g3Xffxdq1a7Ft2zYYGxtDJpPB2toaX331VZ3ntLKyemQsNUnS+fPnkZKSgu7du8PIyAg9e/bEhg0bUFJSgtjYWKUJZqqo7/17nP/+97/o2bMnHB0dFR/W8vLyAAD37t1Denp6reTxQYmJiYp+uQ+O4NbYu3evIsFtqJqfr7Fjx2L8+PF1HvPwJML6kvOG/Py4ubkhIiICR48exYkTJ3DgwAH8+uuvmD9/Pl577bVGxU7UFjDBJdICEokE//rXvzB9+nRs3rwZL7/8suIX9p07d2odn5CQAEtLS6XRW0A+kvfgrfGkpCTIZDLFaJSDgwNkMhmSkpKUJv9kZ2ejsLAQDg4Oim0NTYJrrnfz5k306dOnga/4Hw9ONLt8+bJSCYKdnR0cHBxw8eJFXLx4EV5eXjA0NAQAnDt3Dvn5+fj++++VevI2ZpEBZ2dnxMXFITQ0VC1Jv5WVFQwNDev9nonFYsUo5oOjdw+WdtSM3D/M398f/v7+WLRoEfbu3Ys333wT4eHhmDhxIpydnXH69GkEBgbWmcg/jr29Pezt7XHhwgWkpKQobp337NkTS5cuRUREBKRS6WN7HzfXB6e7d+8iLS1NMdnwQfPmzYOpqamixKIue/fuha6uLr788staSeaFCxewceNGRZJsb28PmUyG1NRUdOnSRXHcg90OAPn3uubDhSo/9/V51HtoZGSEkSNHYuTIkaisrMSrr76Kn376CXPmzIG+vr7aYiBqDViDS6QlalpArV+/HhUVFbC1tYWXlxd2796tdAvz5s2biIyMxMCBA2udY/PmzUqPN23aBEDecxeA4jkPt+9au3at0n4AMDQ0bNCt0x49esDR0REbNmyodXxDRqbs7Ozg6OiIM2fO4OrVqwgICFDaHxAQgMOHD+POnTtKt2NrEpUHr1FZWYlff/31sdesMWLECGRmZmL79u219pWXl6O0tLTB5wLkH1T69u2Lw4cPKyXa2dnZ2LdvH4KCgmBiYgIAijKR8+fPK44rLS2t1TaroKCg1vvo5eUFAIpWZiNGjIBUKq3V9QGQt4VryPcxKCgIZ86cQUxMjOJ99vLygrGxMX7++WcYGBigR48ejzxHzYePh8tDmurjjz/GihUrlL5q2ta9/fbb9Y5c19i7dy+CgoIwcuRIDB8+XOnrpZdeAiBfYAEA+vXrBwC1fo5q/i3VkEgkGDZsGP766y/cvHmz1jVVXTbb0NCwzvevZsS6hp6eHtzc3CAIQr212URtGUdwibTIrFmz8Prrr2Pnzp147rnn8NZbb2H27NmYNGkSJkyYoGgTZmpqWqtHKSAfvZw7dy769++Py5cvY8+ePRg9erSi/ZOnpyfGjx+Pbdu2obCwEMHBwbhy5Qp27dqFIUOGKCZXAfLEdcuWLfjhhx/QuXNnWFlZ1VnjKRaL8eGHH2LevHl46qmn8PTTT8PGxgYJCQm4ffs2fvnll8e+7qCgIPzxxx8AUGuVsoCAAEXy8WCCGxAQAHNzcyxevBjTpk2DSCTCH3/80ahygXHjxmH//v344IMPcPbsWQQGBkIqlSIhIQERERFYvXq1ooykoRYuXIhTp05hypQpmDJlCiQSCbZt24bKykr8+9//VhzXt29f2Nvb47333kNCQgIkEgl27NgBS0tLpVHcXbt2YcuWLRgyZAicnZ1RUlKC7du3w8TERPHBpVevXpg0aRJWrlyJ2NhY9O3bF7q6ukhMTERERATee+89DB8+/JFx9+zZE3v37oVIJFK8zxKJBAEBATh58iR69epVq4b5YV5eXpBIJFi1ahWKioqgp6eH3r17w9raulHv4cNqks4H1STtwcHBj/weRUdHIykpqdYEzBp2dnaKRR9efvlleHt7Y9iwYVi/fj3y8/MVbcISExMBKI+wvvHGGzh79iyeffZZTJw4EV27dkVBQQGuXbuG06dP49y5c41+rT169EB4eDiWLl0KHx8fGBkZISwsDLNmzUKHDh0QGBgIa2trJCQkYNOmTRg4cKDiQxORNmGCS6RFnnzySTg7O2PNmjV49tln0adPH6xevRrLli3DsmXLoKOjg+DgYPz73/9WKkWo8e233+K7777D119/DR0dHUydOhVvvfWW0jGffvopHB0dsWvXLhw6dAgdOnTAnDlzaiXM8+fPR3p6OlavXo2SkhL06tWr3klM/fv3x/r167FixQqsWbMGgiDAyckJzz77bINed02CW1OS8KAHE94HE1xLS0v89NNP+OKLL/Dtt9/CzMwMY8eORWhoKGbNmtWg64rFYqxYsQLr1q3DH3/8gYMHD8LQ0BCOjo6YNm1anZPFHqdbt27YvHkzvv76a6xcuRKCIMDX1xf//e9/lXrg6urq4vvvv8dHH32E7777DjY2NpgxYwbMzMyUaoJ79eqFK1euIDw8HNnZ2TA1NYWvr6+iq0CNjz/+GN7e3ti6dSu++eYbSCQSODg4YOzYsQ1a2rimLMHV1VXR07dm+8mTJ5W6H9THxsYGH330EVauXIn33nsPUqkUGzZsaHKC2xQ1CyE8uGDCw8LCwrB8+XLExcXB09MTX3zxBTp06IA///wTBw8eRJ8+ffDNN99g+PDhSkl+hw4d8Ntvv2HFihU4ePAgtmzZAgsLC3Tt2hVvvvmmSvFOmTIFsbGx2LlzJ9atWwcHBweEhYVh0qRJ2Lt3L9auXYvS0lJ07NgR06ZNwyuvvKLSdYhaO5GgyuwGIiIiarDY2Fg89dRT+O9//4uxY8dqOhwirccaXCIiIjUqLy+vtW39+vUQi8WPnWhHROrBEgUiIiI1Wr16Na5evYrevXtDIpHg+PHjOH78OCZNmlRnL18iUj+WKBAREalRZGQkvv/+e8THx6O0tBSdOnXCuHHjMHfuXOjocFyJqCUwwSUiIiIircIaXCIiIiLSKkxwiYiIiEirsBgI8j6NlZWVsLGx0XQoRERERFSHe/fuQU9P75FLa9dgggugoqICUqlU02EQERERUT2qq6sbvNokE1wAtra2AIDDhw9rOBIiIiIiqsvgwYMbfCxrcImIiIhIqzDBJSIiIiKtwgSXiIiIiLQKE1wiIiIi0ipMcImIiIhIqzDBJSIiIiKtwgSXiIiIiLQKE1wiIiIi0ipMcImIiIhIqzDBJSIiIiKtwgSXiIiIiLQKE1wiIiJqd05fScfS9eeQW1iu6VCoGehoOgAiIiKilrZu33WkZ5egskqGJbNCIBKJNB0SqRFHcImIiKhdycgpQXp2CQAgKjYTxy6laTgiUjcmuERERNSuXLyRBQDQkcjToJ93XUF+UYUmQyI1Y4JLRERE7crFOHmC++zgbujSyQxFpZX4efcVDUdF6sQEl4iIiNqNaqkMMbezAQDB3Tvi9UkBEItFOHE5DWeu3tVwdKQuTHCJiIio3YhLzEVZRTXMTfTg6mCOrk4WGD/QDQDw445oFJdVaThCUgcmuERERKSyxLuF+ON4PKqqpZoOpUFq6m8D3G0hFss7Jzw3zBMONsbILazAmj1XNRkeqQkTXCIiIlLZ8u2XsPqPq1j353VNh9IgigTXw1axTV9XglefDQAAHDyXjOib9zQSG6kPE1wiIiJSSXFpJW6l5AMA9hxPwJX7ta2tVX5RBeJTCwAAAR42Svt6uFpjVF8XAMDy3y6jvKK6xeMj9WGCS0RERCq5Ep8NQfjn8bdbL6K0vPXWsF6+KR+9dbU3h6WpQa3900d6wcbSEJm5pdgYEdvS4ZEaMcElIiIilUTfko/YhvV0gp2VEbLyyvDLnmsajqp+/5Qn2NS538hAFwsm+AMA9p5IQFxibkuFRmrGBJeIiIhUEnNbXqva27sjXp8cAJEIOHA2CeevZ2g4stpkMgGX7tfWBnra1ntcoKctwno6QRCAZdsvtZnJc6SMCS4RERE1Wk5BGVIyiyESAd5uHeDj1gFj+8vbbS3ffhmFJZUajlDZnfQC5BdVwEBPAq8u1o889qVx3rAw1UdKZjG2HbzZQhGSOjHBJSIiokarWSzBzcEcpkZ6AIBpI73gaGuCvKIKrNwZo8nwaqkpT/DtagNdnUenP6ZGepj7tC8A4Pcjt3AnvaDZ4yP1YoJLREREjRZzv/7Wr9s/9az6uhIsei4QYrEIxy+n4cTlNE2FV8ulG/fLE+qpv31YX1979PHtBKlMwHfbLkEqlTVneKRmTHCJiIioUQRBQPT9+lvfrsoJo7uzJSYO7gYA+HFHDPIKy1s8voeVVVQjNjEHABDwiPrbh80d7wsTQ13EpxZg17H45gqPmgETXCIiImqUuzkluJdXBh2JCN1drGrtnzTEA64O5igqrcTy3y5DeLCXmAZcuZ2NaqmAjtZGsO9g0uDnWZoZ4KVx3gCAX/+KQ2pWUXOF+Fil5VWIS8xFxOlE7DuZ0CpGlKuqpRr/3tZHR9MBPGj//v3Ys2cPrl27hsLCQnTu3BnTpk3DM888A5FIpDjut99+w+rVq5Geng4XFxcsWrQIgwYN0mDkREREtcWn5sPG0ghmxnqaDkWtasoTPDpbwUC/diqhqyPGoucCseibYzh/PROHzydjSK/OLR2mwoW4TADKq5c1VFhPJxy/lIaLN7KwctcVfDKnj7rDU1ItlSEtqxiJdwuRlFF4/88iZOWWKh0nEokUC1NoQmpWEd5afgIB7rb497SeGoujPq0qwV23bh0cHBywePFiWFpa4tSpU3j//feRkZGBBQsWAAD+/PNPvP/++5g7dy569+6N8PBwLFiwAJs3b4a/v79mXwARETXI7mPxOHE5Ff+e2hMdrY01HU6z2HsiAT/vvgI3R3P87/WBEItFj39SGxF9S16e4Ne1Q73HdOlkhueHe2L9n9fx8+6r8O1qA1sro5YKUUlN/W2QCgmuSCTCvGd8MWfpIVy+eQ/p94phb9PwUeDHEQQBEacTcTUhB0l3C5F2rxjV0rpHRa3MDGBqpIukjCIcOJuk0QR33b7rKCptvYt6tKoE98cff4SV1T+3OkJDQ5Gfn4+1a9filVdegVgsxrJlyzBq1CgsXLgQANC7d2/cvHkTK1aswKpVqzQUORERNVRVtRRbD8ShpLwaX2++gM/n94NEol0Vcyej07DqjysAgPjUApy+ehd9fe01HJV6yGQCrsTLR3B9uz16wtb4J7ri7NW7iEvKw3fbLuGTOX1aPNG/m12CuzklkIhF8HlEQv4oHa2NEehph6jYTBw8l4wZo7qrLb5z1zLwww7ljhNGBjro3NEMnTuZoXNH0/t/msHMWA+FJZWY8dFfSEgrQHxqPtwcLdQWS0Ndjc/G2WsZEItFmPykR4tfvyFa1f8oDya3Nby8vFBcXIzS0lKkpKQgMTERI0aMUDpm5MiROH36NCorW1fPPSIiqi36VjZKyqsBAHFJefj9yC0NR6ReV+Kz8fXmixAEwO7+iOWWv+Igk7XOWsXGSsooREFxJfT1JHB3tnzksRKxCIueC4S+ngQxt7MRfupOC0X5j5r2YF4uVjAy0FX5PEN7OQMAjkQlq7X+NfxUIgD5YhnvzwrBL+8NxdZPR+LLV/tj/gQ/jO7nCh+3DooyFzNjPYT6dAIAHDyXrLY4GkomE7Bmr3y1umEhneFkZ9riMTREq0pw63LhwgXY2dnBxMQECQkJAAAXF+UheTc3N1RVVSElJUUTIRIRUSOcjJa3jnKyk9/m3XLgBm6l5GkyJLVJvFuIz9acRbVUhlCfTvjfwoEwNtBBUkYRTl+5q+nw1KJmed4ertaP7ScLAPY2Jph5f8Rz7b7rSLtX3KzxPezS/QQ3UIXyhAcFd+8IcxM95BZW4EJcljpCQ0ZOiSIBf3GMN3p17whbKyOleUd1qUm2j15MRUVVy660djI6DbdS8mGoL8Fzw1rn6C3QyhPcqKgohIeH48UXXwQAFBTIGy2bmZkpHVfzuGY/ERG1TlXVMpy5Kl/Gdd7TfujnZw+pTMDXmy+ivLJaw9E1zb28Mny46jRKyqvh1cUKbzwfBDNjPYwdIF/da8sB7RjFrVme169rw/rJAsCIPi7w69YBlVVSfLPlIqSPeR8EQUBllRTFpZUor1D956KqWqaIV5UJZg/S1RFjUJATAPlyxOoQcToRgDz57tSh4bXoft1sYGtpiJKyqhb94FRVLcX68FgAwNODusHS1KDFrt1YraoG90EZGRlYtGgRQkJCMH36dE2HQ0REanDldjZKyqpgYaqP7q7W6GJvhut3cpF2rxhr917DvGf8NB2iSopKK/HBqtPIKSiHk50J3p8VAn1dCQBg7AA37Dkej6SMIpy6ko5+fg4ajlZ1UqkMV+Pl/WR9uzW8nlUsFuG1SQF49au/cSMpD28tPw49XQkqq6SorJKhokp6/+9SVFTJ7refkj9XRyLGJ3NC4e3W+PrZuMRclFVIYW6iB1d780Y//2FPhnTG7mPxOB+bibzCcliaqZ7gVVVLFSUGw0O7NOq5YrEIQ4Kd8euBGzh4NglPBDqqHEdj/Bl5B1m5pbAy08dT9z+4tVatcgS3sLAQs2fPhoWFBZYvXw6xWB6mubn8h7OoqKjW8Q/uJyKi1ikyJh0AEOrTCRKxCKZGelg4OQCAvBYxKjZTk+GppKJKik/XnEVKZhGszQ3w4exQxdK1AGBiqItxilHcG216FPdWSj7KKqphaqTb6ITR1tIIs8f5AABuJufjanwObibnI/FuIe5mlyCnoBxFpVWorPonuQXkbbOWbbus0gh/ze3/AHdbtUxuc7IzhWdnS8hkAo5ENa0sMjLmLgpLKtHB3AC9uts1+vmDezlDJJIvmZyRU9KkWBqiuLQS2w7eBAA8P9yrzvZwrUmri668vBxz5sxBUVERtm3bBlPTf4qXXV1dAQAJCQmKv9c81tXVhZOTU4vHS0REDVMtlSlupz7YUSDAwxZj+rti74kELNt2CcvfHARzE31Nhdko8vKKC7h+JxfGBjr4cHYobC1rt8IaO8ANf5xIQHJGESKj09E/oG2O4tasXubTtYNKCeOQXs6wMNVHQXEF9HQl0NeVQE9XDD1dyQOP5dv0dSWolgp4/eu/cTenBL/+dQMvjunRqOvVJLiBjVi97HGGhnRGXFIeDp5LwtODuj62XrY+++9PuHuydxeVuojYWhrBv5sNLt28h0PnkjF1hJdKcTTU9sO3UFxWhc4dTTE42LlZr6UOrWoEt7q6GgsXLkRCQgJWr14NOzvlTzROTk7o0qULIiIilLaHh4cjNDQUenra1UibiEibXI3PRlFpJcyM9eDtaq20b8ao7nCyM0VeUQVW/B7daldHepAgCPh5VwxOX7kLHYkY780MQZdOZnUea2yoi6cG3h/FPXjjsTWorVXNAg8PL8/bGD297DA42Bn9/R3Qq0dH+LvboruLNbo6WsDJzhR2VkawNDWAkYEuzIz18MoEednKH8du42Zywycj5hWVIyFNPjcnwF19CW4/P3sY6EmQdq8E1+/kqnSOxLuFuH4nF2KxCE+GqJ4sDg2RL55x+Hxys/5MZeaWYu8J+UT/F0b3gKQN9HRuVQnuRx99hL///htz585FcXExLl++rPiqaQH26quvYt++fVi2bBnOnj2LDz74ADExMXjllVc0HD0RET1KZIx89DbUp1OtESt9XQnemBIIHYkIp6/cxeHzrb8rzm+HbyH8VCJEIuCN5wMf22N1TD9XGBvqIiWzCJH3O0m0JRVVUsQmyhM6v0bU3zZVcPeOeCLQETIBWLbtEqqqG9ai6/JN+Wizq4M5LEzVd0fAyEAX/f3lI/CqTjarGb0N6dER1uaGKsfS27sjTI30kF1QrugW0Rw2hseiWiqDX7cOCFLjaHhzalUJbmRkJADg888/x6RJk5S+srLk37jRo0fjk08+wb59+zBr1ixcvHgR33//PQICAjQZOhERPYJUKsPpK/L62/oWPHBztMCUYZ4AgJ93X2mRukJVHTqXjI375bPJXxrn3aCJY8aGuhh/fxR3axNHcWUyAX9fSMGJS2nIyClpkRHvuDu5qKqWwdrcAA5qXMmrIV4a5w1zEz0kZRTh98M3G/Sci3HqaQ9Wl6H3lx2OjElHaXnjVvMqq6jG3xdSAQAj+3RpUhy6OhIMCpJPMDt4Tj2dHR52KyUPxy6lQiQCZo7uoXJJRktrVTW4R44cadBxEydOxMSJE5s5GiIiUpdrd3JQUFwJUyPdR450Pj2oG6JiM3H9Ti7+9+tFLJ3fr9XdDo2KzcTy3y4DAJ4Z1BVj+zd8NvmY/q7YfSweKZnFOHk5DQNVmP0uCAJ+2hmD/fdbTAGAqZEeujlbwN3JEu7OFujmZKnWUUvgn/pb364dWjzJMTfRx5ynfPHlpihsP3wTfXzt0bmechBA/gHg0s3mS3A9u1jC0dYEqVnFOH4prVFdEI5dTEVZRTXsOxg3qdSjxtCQzthzIgFnr2Ygv6hCrd93QRCwdu91AMATgY4aWTVNVa1qBJeIiLRTZLR89La3dyfoPGJCTc3KV4b6OohNzMXOv1vXKmc3k/Pw+YbzkMkEPBHkiOkjG7dkq5GBLp56QvVRXEGQryK1/7S8NMLVwRw6EhGKSitxMS4LWw/ewMe/nMW0DyMw69MD+HzDeez8+xauxGejrAn9ZAEg+tb9/rePWZ63ufTzt0dIj46olgr4btulR753CekFKCiuhKG+BJ5daq+S2lQikUgxituYkVNBELD//splw0O7qKWzQ5dOZujmZAHp/VF9dTofm4kr8dnQ1RE3+yQ2dWOCS0TUTlRLZdgUEYsr8dktel2pTFB0T+hTT3nCgzpaG+Plp+TtpDZHxCE+Nb85w2uwlMwifLT6DCoqpQhwt8FrzwaolKCM6ecKUyNdpGYV48TlxtXibjlwA7uPxQMAFkz0x3f/egLb/28Uvn59AOaO90FYTyc42ZlAJAKy8soQGZ2Otfuu490fIjHr0wO4reJ7WVJWhdsp8ueqY9RRFSKRCPOe8YWxgQ5upeRjz/H4eo+tqUf17WrToNXWVBHW0wkSsQg3k/ORdLewQc+5mZyHhPQC6OqI1dqJoGay2cFzSWorV5FKZVi3T74k79j+rnV2B2nNmOASEbUTxy+lYtvBm/hqUxSk0oZN1FGH2Ds5yCuqgLGhboNH/wYHOyHUp5O8DdevF1p8OdKHZeWVYsnKUygsqURXJwssnhGscuJkZKCLpwZ2BQBsPdDwUdydf9/ClgM3AACzn/LGk/eTGl0dCdydLTGqnysWPReIH94ajC2fjMSnc/tgxqjuCPXpBEtTfRSVVuGnHTEq9eG9Gp8NmQDYdzCGjaXqk6KaytrcEDPHeAMANkXEIT277mV/Ff1vm6E8oYaFqT569egIADjQwFHc8Pujt/39HWBmrL7OTwP8HaCnK0FKZjFuJKln2euD55KRklkMUyM9TBzsrpZztiQmuERE7URUrPyXfm5hBS7dn2HeEmoWdwjp0bHBSaFIJML8CX6wNNVHSmYx1v95vc7jSsurkJJZhOib93AkKhm/Hb6JlTtjsG7fNRSWVKol/oLiCixZeRrZBeVwsDHBhy/1hpGBbpPOObqfC0yNdJF2rxgnLqU+9vg/I+9g7T75ezB9pNdj635rPkxMCOuGd1/ohW//9QQM9XVwIzlPpdvY0bflo/6aKk940JMhzoplf7/fHl0rYS8tr0Ls/fZdzVF/+6ChveSjsH9HpaKq+tEfwopKK3Hy/oj9iCZOLnuYsaEu+vnJ746oYxnhsopqbP4rDgAw+Ul3GBs27eddE1rVJDMiImoeUqlMMaoFyPtm9vRq/OpJjSWTCTh1vz1YX7/Hlyc8yNxEH69NCsBHq89g74kESKUylFVUI6egHDkF5cgtLH9kXempK3fx/oshcLIzrfeYxyktr8KHq04j7V4xOlgY4pM5fdSyCIWRgS7GP9EVG8JjsfXgDfT3d6i32f/h88n4aWcMAGDi4G4qjaZZmRlg8lB3rN13Hev/vI5Qn06NStJj7tffNmZ53uYiEomwYKI/Fnz1N67EZ+PA2SSlSV4xt7MhlQnoZG2MTh2MmzWWQA9bWJkZILewHGevZTyym8bh88morJbBxd4MHs6Wao9laC9nHIlKwcnoNMx+ygeGTVhpbNfR28gvqkCnDsYYEeqixihbDkdwiYjagbikPJSUVSlGUM9czUBxqXpGOB/lRlIecgvLYWSggwD3xo/+9fSyU7RSCj+ViL8vpCLmdjbS7hUrklsjAx042ZnAv5sNwno6YUJYN9haGeFudgn+vey4yv1BK6uk+GztOdxOLYCZsR4+fjlUrbfnR/V1gamRHtLuleDYpbprcU9Gp2HZtksA5B0YpjVhos+Y/m5wsDFGXlGFYsnVhsgrKkdSRhEAwMdN8wkuIK/Trnkv1uy9huz8MsW+f8oTmn+0WSIRY3CwfBXVg2eT6z3uwcllI/q4NEsXih6u1rDvYIyyCqlipFgVuYXl2Hn0NgBgxsjuzVbD3Nw4gktE1A5ciMsEAPTxsUdyZiHupBfi2KU0jOrbvKMzJ2Pkv2h79egIXR2JSueYOaYHDPR0UFkthbW5IazNDWBtbgArMwNYmxvWOVI1boAb/m/dOcQm5uLD1Wfw8lM+jXqtUqkM/90UhZjb2TDUl+DD2b2bNBJcF/korhs2hMdi28EbGBigPIp77noGvtp0ATIBeDKkM2aP825SYqSrI8ZL43zw0eoz2HMiHk/27tygfrY1q5e52pu3qiWUR/dzxYnLabiRlIcVv0djyawQiEQixQea5i5PqDG0V2f8dvgWLt3MQlZeaZ2TsWJuZSM9uwSG+jp4QoXWcA0hEokwpJczNoTH4uC5ZMXEs8b69a84VFRK4dnZEn18O6k5ypbTNtNyIiJqlKhYeYLb08tWMXv78Pn6R5zUQSYTcCr60Ys7NISBng5mjumBOeN9MSGsGwYFOcG3qw0cbU3rvQ1rYaqPz+b1QVhPJ8hk8r6xP+2MadDkOkEQsOL3aJy5mqFYgrebk/pvKQPyJM3USA/p2SU49kAtbvTNe/h8/XlIZQIGBDjglQl+ahn16+llh55edqiWClj9x9UGPSfmfv1tayhPeJBELMJrz/pDRyJGVGwmjl1KQ3p2MTJySqEjET12ZTl16dTBGD5uHSAIqHcFvvDT8pXLBgU5Nql04HEGBztDLBYhNjEXKZlFjX5+UkYhDt6v4X1xTNM+UGkaE1wiIi2XU1CGO+mFEInks8qfCHSERCzCrZR8JGU0rL2RKm6l5CG7oByG+pJmnc1eH10dCRZODsD0kfJb2X9G3sFHq8+guOzRK0+t23cdB88lQywC3poW1KwTqwz1dfD0oPsdFQ7ehFQqw/U7Ofhk7VlUVcsQ0qMjFj0XqNbFLmaP84aORISo2Eycv57x2OM13f/2UZw7mmHyUHlN8s+7ruDvKPmHBK8u1k2eCNgYQ0PkHxoPnU+uNektp6AMZ67K3+cRfZr3jomVmQF6espr6w+ea9wH2DvpBVi67jxkgnw5bS8X9fcPbklMcImItFzN6K27syXMTfRhbqKP4O7yX4L1jTipQ+T9yWXBXh2hr6taeUJTiUQiTBzsjndfCIa+ngSXbt7Dv5cdr7e91I4jtxT1hwsm+iPUR/WR54Ya1dcFZsZ6uJtdgnV/Xlfqtfv29J6PXBhDFfY2Jhg3QN6FYfUfVx85+z8jpwSZuaWQiEXo3koTnmfCuqFLJzMUlVZi60F5G7WWqL99UB9fexgb6CArtxQxt5U7lBw8J096vbpYocsjVl9Tl5pk+++oFFQ38I7Fn5F38MZ3x5F2rxhWZgZ4cUyP5g6z2THBJSLScv+UJ/zTNaGmTOHvCw37JdhYgiAgMlpef9vY7gnNIdTHHl/M74cO5gZIzSrGm98dr7XgxV9nkrDufjuymaO7q1zD2FiG+jp4+gn5KO7uY/EoLa9GD1drvDuzl8p1y4/z7BB3WJrqIz27BHuOJ9R7XE15gruzZYuOiDaGjkSM1ycF4MFB7paqv62hrytRLLv84GQzqVSGv+4vqTxSza3B6tPTyw4WpvrIL6547Ah9UWkllq4/j592xqCqWobg7nZY9sYT6GjdvN0nWgITXCIiLVZVLVXcYq65dQnIfwmam+ghv6hCqX2YutxOzUdWXhn09SQI9Gz58oS6uDla4OuFA+HubIGi0iosWXlK0TM0MiYdP/x+GQDwzKCueHpQtxaNrWYUFwDcnS2wZFYIDPSar1bTyEAXL4yWLzO87dAN5BaW13lcdCtqD/YoXZ0sMP7+hwQLU3242Ju3eAw1H4hOXbmr6MF8PjYT2QXlMDXSa9AqfuqgIxFjcE95Z4cDj+jscP1ODl77+ihOX7kLHYkIs8d54/0XQ1rVRMKmYIJLRKTFrifkoqxCCgtTfbg6/PNLX0cixhOB8l+ChxpZq9cQkfcnl/X0smvWRK2xrMwM8H+v9EN/fwdUSwUs334Z/90UpdStYMao7i0el4G+Dv49NQhjB7jiw9mhLTJa+kSgEzw6W6KsQlrnQhqCIChGcFtj/e3DpgzzxLND3OWjuWqsWW6oro4WcLU3R7VUhqMX5aU/+++P3g7t5Qy9FizTGXJ/AYqLcZnIKShT2ieVCdh26Abe+SES2fll6NTBGP99dQDGDnBr05PKHsYEl4hIi0Xdbw8W5Glb65d+Tf/O89czUFBcobZrCsI/izv0awXlCQ/T15Xg31OD8NyTHgCA45fSUC2VIdSnk9q6FajC390Ws8f5wNRIfUu4PopYLMLLT/lAJAKORKUgLilXaX9yZhHyiyqgpyuBZ+fm6SKhTnq6Ekwb4dUiC5jUp6b+9eDZZGTklChalj24EEVLcLQ1RXcXK8ge6uyQU1CGJStPYdP+OMhkAp4IcsS3iwaiq5NFi8bXEpjgEhFpsbrqb2u42JvDzdEc1VIBx+tZaEAVCWkFuJtTAj1dCYI8NZdsPIpIJMKUYZ7499QgGBnoILi7Hf49NUit3QraAndnSwy5X4+9ctcVpQ4ANeUJ3V2smq0WWNs8EegIXR0xEu8W4offoyEI8nrg5l5RrS5De8lLJg7dn+QWFZuJ174+ipjb2TDQk2DRcwF4Y0pQq62tbiomuEREAKqqZfXWIbZVGTklSM0qhlgsgr973XWwNcnNITX2xI2MkZcnBHnaNmvPT3UYEOCIzR+PwJJZvdttEjdtpBeMDHRwOyVfqTdyzQIPvi3UT1YbmBjpIdRHvjjCpZvyDwgtPXpbo5+fPQz1dXA3pwT/t+4cPlp9BoUllXC1N8c3iwYirKezRuJqKUxwiajdK6+sxjsrTmLGR3/h8/XnVWqQ3hpduD9669XFCiaGdY/SDAhwhI5EjIS0AtxJL2jyNeXdE5q+uENLUncbrrbG0tRAUa6xITwWJWVVkEplii4TbaH+tjV5stc/3Tc6mBugV3fN3MUw0NfBgAAHAMDZa/JuCqP7ueC/r/WHo616V+Vrjdr3v2oiavdkMgHfbrmEG8l5AOSjjwv+ewTfbr2IzNxSDUfXNFFx8vq/R9UkmhnrIaRHRwDqGcVNyihCenYJdHXEil671PqN6usKR1sT5BdXYOvBG4hPK0BpeTWMDXTg5mih6fDaFJ+uHWBnJV+u98neXZSWX25pI0K7QEcihqmRLt6b2Qtzxvu26GQ3TWKCS0Tt2ua/4hAZkw4diQivTwpAb++OiokZcz8/hJW7YpDXBksXKqqkiKlpD/aYSTc1k82OXkhFVXXTeuKevN/7NtDDVmtr+7SRro4Ys8f5AAD2nkjAn5HypWW93Tq0u7rkphKLRXh9cgBG93PBuAGuGo3FzdECP7wVhp/fHYre3p00GktLa93FUUREzehIVAq2H7oJAHj1WX+E9XTGkF7OuJGUi03743D51j3sO3kHB88lY2x/Vzz9RFeYtNAM96a6cjsbldUydDA3QOeOj74dGehhC0tTfeQVVSAqNlNRQ6iKU/frb1vD4g7UOIGetgjp0RFnr2XgSJR85j3LE1Tj49YBPm6to3ZZExPcWgOO4BJRu3T9Tg6Wb78MAJg4uJvShAuPzlb4ZG4ffDq3DzycLVFRKcVvh2/hpc8OYvuhmyirqNZQ1A1XU38b5GX32LZXEokYg4Lko7iHm1CmkJxRiJTMYuhIxOjVvaPK5yHNmTXWW6km2a+VL/BAVB8muETU7mTklOCztecUvU+nDveq8zi/bjb472v98Z+ZvdC5oylKyquxcX8sXv6/Q9h7IgFV1dIWjrxhBEFQ9L9taE/QmjKFqNhM5Bep1hM38n7v2wAPGxjXM6mNWrdOHYwx/gk3AIClqT6c7LR/MhJpJ5YoEFG7UlJWhY9/OYvCkkq4OZrjX88FPnLVI5FIhBDvTujZvSNOXE7DrxFxuJtTgp93X8GtlDz8a0pQC0bfMGn3ipGRUwodiajBt5idO5rB3dkCN5PzcfRiKp4a6Nbo60ber79tK90TqG7PDnFHRaUUPl07aNXKVtS+cASXiNoNqVSGLzdFISWzCFZmBnj/xRAYNLBPq0QswhOBjvjh7TC8/JR8Ms6Jy2koLa9qzpBVEhUr757g7dqhUX1oB9/viXv4fDIEQXjM0crOXctAUkYRJGKRoisDtU0GejqY/ZRPu5uURNqFCS4RtRur91zFxbgs6OlK8P6LIbA2N2z0OXQkYozp7woHGxNUSwXFSmGtyYP1t40xwN9BsQpTfFrDeuJKZQI2R8Th07VnAcgnl7WViXhEpL2Y4BJRu/Bn5B3sOylvffTGlMAmr73e21s+SnnmakZTQ1OrsopqXE2QN+jv6VX36mX1MTHSU4zaNWSyWUFxBT5cdRpbD96AIMh7bi6cHND4oImI1IwJLhFpvYs3svDz7isAgOkjvdBHDTWiNa20omIzW9Vks+hb91AtFdDJ2hgONiaNfn7N0r3HLqY+8nXFJeZi4f+O4vLNe9DXk+BfUwLxygS/drvcLRG1LkxwiUirJWcU4osN5yGTCQjr6YQJYd3Uct5uTpawMjNAWUU1om9lq+Wc6hClKE+wVWmCkJ+7DazNDVBUWoVz12uXXwiCgD0n4rF4xUlkF5TDwcYEX782QNFmjIioNWCCS0Raq6C4Ap+sOYvS8mp0d7HCgol+apsVLhaLEKIoU7irlnM2lSAIivrbhrYHe5hELEJYT3myeuiccplCaXkVvtwYhVW7r0IqE9DXzx7/WzgAnTuZNS1wIiI1Y4JLRFqpqlqGpevPIyOnFHZWRnj3hV5qv30eer9e9ezVDEhljes60BySMoqQXVAOPV0JvJuwilJNN4WLN7KQe3+Z4qSMQvzr2+M4GZ0OiViE2eO88fa0nlyOl4haJfbBJSKtdCQqGdcScmBkoIMls0JgbqKv9mt4u3WAsYEO8osrcCMpF91drNV+jcaoKU/w7doB+rqqJ/MONibw6mKF2MRcHL2QAiszA3z/ezQqKqWwNjfA4unB8Oxipa6wiYjUjgkuEWkdQRCw90QCAOC5Jz3g3LF5bqHr6ogR3L0jjl5Mxekrd1tNgqtqecKDBgc7ITYxF78euIGKSvlkM/9uNnhzalCzfFggIlInligQkda5Ep+NpIwi6OtJMKRX52a9Vu8HyhQauziCOhWXVSE2MRcAEOTZuPZgdenn5wA9XYkiuZ001B0fvhzK5JaI2gSO4BKR1qnpdxsW5AQTw+atEQ30tIWujhh3c0qQlFGELhqacHX5ZhZkMgFOdiboaG3c5PMZG+piypMeOHoxFTNGdVfLqDARUUthgktEWiUrtxRn73c1GN3PpdmvZ6ivA393G5y/nokzV+9qLMFVtAfzVF8i+kxYNzyjprZqREQtiSUKRKRVwk/dgUwA/Lp1aLba24fVdFM4fUUz7cJkMgEX4rIAqKf+loiorWOCS0Rao7yyGgfOJgEARvdzbbHr9urREWIRkJBWgKzc0ha7bo2EtALkF1XAUF+i8YluREStARNcItIaxy6moai0CrZWRgju3rHFrmtuog+v+4mlJhZ9iIqTlyf4u8vrgYmI2jv+T0hEWkEQBOw7KW8NNqqPCyRi9axY1lChPvIyhTNXM1r0ukDz1N8SEbVlTHCJSCtcS8hB4t1C6OtJ8GSIc4tfv6Zd2LWEbBQUV6h8HqlMwIGzSYg4nYjoW/eQlVcK2SNWSSsorsDN5DwAQE+vprcHIyLSBuyiQERaYe/90dsnAh1hYqTX4te3szKCq705EtILcP56Job0Ui3J3nsiHr/suaa0TUciRkdrI3TqYIxO1sbyP+9/XU/IhSAALvZmsDY3VMdLISJq85jgElGbl5VXqigNaMnJZQ/r7d0RCekFOHP1rkoJbnZ+GX79Kw4A4NnZEkWlVcjMLUG1VIbUrGKkZhXX+1x2TyAi+gcTXCJq8/afSoRMJsC3aweN9aEFgN4+nfDrgRu4dCML5RXVMNBv3H+xq/dcRVmFFJ6dLfHFgv4Qi0WQygRk55fhbnYx7maXID27BBk5JbibLf+qrJZBLBahv79DM70qIqK2hwkuEbVpFVVS/HWmpjVY8y/s8ChdOpnBzsoImbmluHgjC3187Rv83ItxWYiMTodYBLwywQ/i+5PkJGIR7KyMYGdlBH935efIZALyisoBgOUJREQP4CQzImrTTlxKRVFpJWwtDdGrBVuD1UUkEim6KZxuRLuwyiopftoVAwAY3d8VLvbmDXqeWCyCtbkhk1sioocwwSWiNksQBOw9cQcAMLKPCyQSzf+XVtNN4fz1TFRLZQ16zo6/b+NudgmszPTx/DDP5gyPiKhd0PxvAyIiFV2/k4uE9ALo6YgxNKSzpsMBAHh2sYK5iR5KyqpwNT77scffzS7Bb4dvAgBeGusDIwPd5g6RiEjrMcElojZL0RosyAlmxi3fGqwuErEIIT0atuiDIAj4aVcMqqpl8O9mg37+Da/ZJSKi+jHBJaI2KTu/DKevyOtcNT257GG9veW1wGeu3n3kIg2nr9zFxbgs6EjEmPuML0Sill19jYhIWzWpi0JxcTHS09NRWFgIQaj9n3hwcHBTTk9ErYRMJihm9bcW+0/LW4N5u1k3eFJWS/HrZgNDfQlyCspxOzUf7s6WtY4pq6jGqt1XAADPDOoKBxuTlg6TiEhrqZTg5uXl4ZNPPsGBAwcglUpr7RcEASKRCLGxsU0OkIg0KzWrCG8uO4EgD1ssfC4AujoSTYeEyiopIk4nAtDswg710dOVINDTDpHR6Thz9W6dCe7WAzeQXVAOOysjTBziXsdZiIhIVSoluO+//z7+/vtvTJs2DT179oSZmeYaqxNR8zoSlYKSsiocv5yG0opqvDMjGHq6mk1yT1xOQ2FJJTpYGKJ3D822BqtPqHcnRYI7fWR3pX1Jdwvxx/F4AMDL432gr+H3k4hI26iU4EZGRmLGjBl466231BpMUlISfvnlF0RHR+PWrVtwdXXFvn37lI6ZNm0azp07V+u54eHhcHNzU2s8RCRvd1UjKjYTn645i/deDNFYUiYIgmJy2cg+XVpFa7C69PSyg45EhJTMYqRmFcHR1hSAPP4fd8ZAKhPQ27ujxnv3EhFpI5V+MxgYGMDBQf3LQt66dQvHjh1D586dH5msBgYGYtu2bUpfjo6Oao+HqL3Lyi1F4t1CiEXAOzOCYaAnwaWb9/Dx6jMor6hu8vmrpTL5RKsbWShr4PniEvMQn1oAXR0xnmwlrcHqYmyoC9+uNgCgmAwHAH9fSMG1hBzo60kwe5yPpsIjItJqKo3gjh07FocOHcLzzz+v1mDCwsIwZMgQAMDixYtx9erVOo8zMzODv7+/Wq9NRLWduy5vc+XlYo0+vvYwN9HHR6tPI+Z2Nj5cfQZLZoWo3Lc1Ia0A3229hIT0AgDyVbm6OprD27UDerhZo7uLNUwMa597X01rsEBHmJvoq/jKWkZvn064eCMLZ69mYOJgdxSXVmLN3msAgMlDPWBrZaThCImItJNKCe6wYcNw/vx5zJo1C5MmTULHjh0hkdS+XdmjR49GnVcsbp23Gonaq7PX5AluzW30Hq7W+PjlPvhg1WlcS8jBh6vO4MPZvRuV5FZVy/Db4ZvYfugmpDIBJoa6MDLURVZuKW4m5+Nmcj52Hr0NkQhwsTeHt6s1vO8nvNVSGSJj0gG0zsllDwvp0RE/7ojGjeQ85BSUYduhmygoroSTnQnGDWBJFRFRc1EpwZ0yZYri76dOnaq1v7m7KJw7dw7+/v6QSqXw8/PD66+/zpZkRGpWWv7PSly9etgptnt2scInc/pgyc+nEZuYiyUrT+PDl0PrHG19WHxqPr7degmJdwsBAKE+nTDvGV9YmhogK68U1xJycDU+B9cSspF2rwQJaQVISCvAnhPyUVtzEz1IZQK6u1jB1aF1tQari5WZATycLRGXlIfNEXE4dD4ZADDvaT/o6vADPRFRc1EpwV26dKm642iw4OBgjBs3Dl26dEFWVhZ++eUXzJw5Exs3bkRAQIDG4iLSNpdu3EO1VIB9B2PFBKka7s6W+GxuH7y/8hRuJOfh/Z8i8fGcPjA1qns1sapqKbYdvInfjtyCTCbAzFgPc5/2RT8/e8XiBraWRrANMsKgICcAQG5h+f2ENxtXE3KQnFGEguJKAMDY/m1n9LO3dyfEJeXh4Dl5cvtEkCN8unbQcFRERNqt0QluZWUlzMzM4ODgAE9Pz+aI6ZFee+01pcdPPPEERo8ejR9++AGrVq1q8XiItFVN/W2vetpwuTla4LN5ffGfn07hdmoB3vsxEp/M6VOrLvZWSh6+23oJSRlFAIC+fvaYO94XFqaPrp+1MjNAf38H9PeXT2gtKK7A9Ts5qJYK6OPbqakvr8WE+nTCuj+vAwCMDXTw4ujGlW4REVHjNfoema6uLl5//XVcunSpOeJpNCMjIwwcOBDXrl3TdChEWkMqExAVK28P9qg2Vi725vi/V/rCwlQfd9IL8d6PkcgvqgAgH7XdEH4dby47gaSMIpib6OHt6T2xeHrwY5Pbupib6CPUxx79/R3a1JK29jYm6NJJ3it86ggvWJoZaDgiIiLt1+gRXJFIhC5duiAvL6854iGiVuBGUi4KSyphbKgLLxerRx7buaMZlr7SF+/9eApJGUV498eTmDm6B9b9eR3J90dt+/s7YM54n1bf9aC5vDMjGAnpBejra6/pUIiI2gWVZjnMmTMHmzdvRkJCgrrjabTS0lIcPXoUPj7sJ0mkLufud08I8rSFTgMWUnC0NcXS+X3RwcIQKZnF+PiXs0jOKIKFiT4WzwjGW9N6ttvkFpCP4vbza1sjz0REbZlKk8yio6NhYWGBMWPGoFevXnBwcICBQe3bbv/5z38add6ysjIcO3YMAJCWlobi4mJEREQAAHr16oWEhASsXr0aQ4cOhYODA7KysrB27Vrcu3cP3333nSovhYjqUFN/G9KIZXDtO5jIR3J/OoWs3FIMDHDE7Ke823ViS0REmiESBEFo7JMaMrlMlTZhqampGDx4cJ37NmzYgI4dO+Ljjz/GjRs3kJ+fD0NDQwQEBGDBggXw9fVt1LUeVHPNw4cPq3wOIm1xN7sELy89BIlYhE0fj2hQ+68HlZZXISuvTFF3SkREpA6NyddUGsGNi4tT5WmP5ejoiBs3bjzymF9++aVZrk1EcjWjtz1c615J7HGMDHTRpZNqq5sRERGpAzuNE5GSmvrb4Ed0TyAiImrNmOASkUJxWRWuJeQAUF69jIiIqC1RqUTB09OzQbOBm2upXiJSVlUtw5X4bHR1tICZcd2riTXExbhMSGUCHG1NYN/BRI0REhERtRyVEtz58+fXSnClUinS0tJw6NAhuLi4YNCgQWoJkIjqV1UtxcFzyfj9yC3cyyuDm6M5vn59ICRi1dpRnbsmX9yhMd0TiIiIWhuVEtxXX3213n1ZWVmYNGkSunTpompMRPQYFVVS/HUmETv/vo2cgnLF9vjUAvwdlYIhvZwbfU6pVIYLcfIEl/W3RETUlqmU4D6Kra0tJk+ejB9++AGjR49W9+mJ2rWyimrsP5WIXcduK5bEtTY3wDODuqG0ogqb9sdh4/7r6OdnDwP9xv3zvp6Yi+KyKpga6cGzy6NXLyMiImrN1J7gAoChoSFSU1Ob49RE7VJpeRX2nbyD3cfiUVRaCQCwtTTEhMHuGBLsBF0dCaqqpTh0LhkZOaXYefQ2pgx7fL/qB9V0T+jpZatyiQMREVFroPYE9+bNm9i4cSNLFIjUoLi0EntPJOCPEwkoKasCAHSyNsbEwd0wqKeT0jK6ujoSvDCqBz7fcB47j97GsN6dYW1u2OBrnb/f/7YX62+JiKiNUynBDQsLq7OLQlFREYqKimBgYIAffvihycERtWeHziVh1R9XUVpeDQBwtDXBs0PcMcDfARJJ3R3++vh2glcXK8Qm5mLj/lgsnBzYoGulZhUh7V4JdCQiBHrYqu01EBERaYJKCW6vXr3qTHDNzc3h5OSEUaNGwcLCoqmxEbVbd7NLsOL3GFRLZejSyQzPDnFHH1/7x5YOiEQivDTOG298dxxHolIwup8rujpaPPZ6Nd0TvN06wMiAq5AREVHbplKC+/nnn6s7DiJ6wNp911AtlcHf3QYfzQ6FuBE1se7OlhgY4Ihjl1KxZs81fDavz2P7Vtcsz9uL3ROIiEgLqLSS2TvvvIPo6Oh698fExOCdd95ROSii9uzK7WycvnIXYrF8NLYxyW2N6aO8oKcjxpX4bMXksfoUlVYiNjEXAOtviYhIO6iU4O7atQvJycn17k9NTcXu3btVjYmo3ZLKBKz64woAYERoF3TuaKbSeWwtjTBuoBsAYM3ea6iqltV77IXYTMhkAjp3NIWdlZFK1yMiImpNVEpwHycrKwsGBgbNcWoirXboXBLupBfC2FAXzz3p0aRzTQjrBgsTfaRnl2D/6Tv1Hnfuurz+lqO3RESkLRpcg3vo0CEcPnxY8Xj79u04depUreOKiopw6tQpeHt7qydConaipKwKG/fHAgCmPOkBcxP9Jp3PyEAXzw/3xIrfo7H1wA2EBTnBxEhP6Ziq6n9WL2P9LRERaYsGJ7jx8fGIiIgAIJ+pHR0djatXryodIxKJYGRkhODgYCxevFi9kRJpue2HbqKguBIONiYY2ddFLecc2ssZ+04mICmjCNsO3cSsscofPK8n5KC0vBoWJvro5myplmsSERFpWoMT3Dlz5mDOnDkAAE9PT3z22WcYM2ZMswVG1J6kZxdjz4l4AMBL47yVFnBoColEjBfHeOODVaex72QCRvTpAvsOJor9Nd0TenrZcfUyIiLSGir9Fo2Li2NyS6RGa/ZcQ7VUQKCnLXp62an13IGetgj0tEW1VMC6fdcV2wVB+Kc9WA/1XpOIiEiTmjRMdPnyZaxcuRL/93//h8TERABAWVkZrl27hpKSEnXER6T1om/ew9lrGRCLRZg1pkezXOPFMT0gFgGnr9zFtYQcAEBKZhEyckqhIxHD352rlxERkfZQKcGtrKzEggUL8Nxzz+Gbb77Bxo0bcffuXfkJxWK8+OKL2LBhg1oDJdJGUqkMq/fIa9lH9ukCZxXbgj1O545mGNa7CwBg9Z6rkMkERfcE324dYKiv0povRERErZJKCe53332Ho0eP4sMPP0RERAQEQVDs09fXx/Dhw5U6LhBR3Q6cTULi3UKYGOriuSc9m/VaU4Z5wlBfB7dT8nHsUqpiAQh2TyAiIm2jUoL7559/YvLkyZg0aRLMzc1r7Xdzc0NKSkqTgyPSZsVlVdgUEQdAnnyaGes95hlNY2Gqj4mDuwEA1u27hrik+6uXMcElIiIto1KCm5OTAw+P+pvQSyQSlJeXqxwUUXuw7eANFJZUwsnOBCP6dGmRa44b4AZbS0PkFlZAEABXe3PYWBq2yLWJiIhaikoJbqdOnZCQkFDv/osXL8LZ2VnloIi0Xdq9Yuw9If83NGus+tqCPY6ergQzRnVXPA5m9wQiItJCKv1WHT16NLZu3YpLly4ptolE8h6a27dvx/79+/HUU0+pJUAibbRmzzVIZQJ6etkhyLNlk8z+/g7w7doBOhIxBvg7tOi1iYiIWoJKU6fnzp2L6OhoTJ06Fa6urhCJRFi6dCkKCgqQkZGBgQMH4oUXXlBzqETa4dKNLJy7ngGJWIQXm6kt2KOIRCJ8OLs3SsqqYWHatOWAiYiIWiOVElw9PT2sXr0ae/bswV9//QWZTIbKykp4eHhg4cKFGDdunGJEl4j+8WBbsFF9XeBkZ6qROHR1JLAwlWjk2kRERM1N5eaXIpEI48aNw7hx4+rcf/78eQQHB6scGJE2ijiThOSMIpga6eK5J+ufqElERESqU/vMlsOHD2Py5MmYPn26uk9N1KYVl1Zi8/22YM8P84SJUfO2BSMiImqvGjWCGxkZiQ0bNiA5ORnm5uYYPny4otb20KFD+PbbbxEfHw8LCwvMnz+/OeIlarMiziShqLQSTnamGB7aRdPhEBERaa0GJ7jHjh3D3LlzIQgCLC0tkZycjOjoaOTk5KCsrAybNm2Cs7MzlixZgqeffhr6+py8QlRDKhOw/9QdAMDTT3SFpIXaghEREbVHDU5wV69eDVtbW6xZswZubm4oKirCokWLsG7dOohEIrz//vuYPHkyJBJOXCF62IW4TGTllcHUSBf9A9iai4iIqDk1eBjp+vXreO655+Dm5gYAMDU1xcKFC1FVVYU5c+bg+eefZ3JLVI/wSPno7ZBenaGvy38nREREzanBCW5JSQns7e2VttU89vHxUW9URFrkbnYJLt7IAgCMYO0tERFRs2tUIeDDvW1rHuvq6qovIiItE3E6EYIABHraolMHY02HQ0REpPUa1UVh9+7diI6OVjyuqKiASCTC5s2bcfjw4VrH/+c//2l6hERtWEWVFAfPJQEARvVx0XA0RERE7UOj24RFRkbW2n7o0KFa20QiERNcavdOXk5DUWkVbC0NEeRlp+lwiIiI2oUGJ7hxcXHNGQeRVgq/3xpseGgXSMRcvpqIiKglsBknUTO5lZKHm8n50JGIMbRXZ02HQ0RE1G4wwSVqJuGRiQCAfn72sDDlwidEREQthQkuUTMoKq3E8UupAICRnFxGRETUopjgEjWDw+eTUVktg6u9OTy7WGo6HCIionaFCS6RmslkAsJPJQIARvbtUqt/NBERETUvJrhEanb51j3czS6BkYEOBgY4ajocIiKidqfJCW5WVhbi4uJQWlqqjniI2rzwSHlrsMHBzjDQb1SraSIiIlIDlRPcQ4cOYfjw4Rg4cCDGjx+vWOEsNzcXTz31VJ2LPxBpu6zcUpy/ngEAGNmni2aDISIiaqdUSnCPHDmCV199FZaWlpg/fz4EQVDss7Kygp2dHXbs2KG2IInaiogziZAJgF+3DnC0NdV0OERERO2SSgnuihUr0LNnT2zZsgXPP/98rf3+/v6IjY1tcnBEbUlVtRQHzyYDYGswIiIiTVIpwb116xZGjBhR7/4OHTogJydH5aCI2qLImLvIL66AtbkBQnp01HQ4RERE7ZZKCa6hoSHKysrq3Z+SkgILCwtVYyJqk2omlw0P7QKJhA1KiIiINEWl38IhISHYvXs3qqura+27d+8etm/fjn79+jU5OKK24k56AWITcyERi/BkSGdNh0NERNSuqZTgLly4EBkZGZgwYQK2bdsGkUiEkydP4ptvvsGYMWMgCALmz5/f6PMmJSVhyZIlGDduHLp3747Ro0fXedxvv/2GYcOGwcfHB2PHjsXff/+tyssgUpuahR1CfTrBysxAs8EQERG1cyoluK6urvj1119hYWGB7777DoIg4JdffsHKlSvh7u6OX3/9FY6OjW9wf+vWLRw7dgydO3eGm5tbncf8+eefeP/99zFixAisWrUK/v7+WLBgAS5fvqzKSyFqspKyKhy9kAIAGNmXk8uIiIg0TeUu9N26dcO6detQUFCApKQkCIIAJycnWFlZqRxMWFgYhgwZAgBYvHgxrl69WuuYZcuWYdSoUVi4cCEAoHfv3rh58yZWrFiBVatWqXxtIlUdiUpBeaUUzh1N4e1qrelwiIiI2r0mz4QxNzeHr68v/Pz8mpTcAoBY/OhwUlJSkJiYWKuDw8iRI3H69GlUVlY26fpEjSUIAsJPySeXjezjApFIpOGIiIiIqEEjuLt371bp5E899ZRKz6tPQkICAMDFRfk2sJubG6qqqpCSklJvaQNRc7gSn43UrGIY6kswKKjxZTlERESkfg1KcBcvXlxrW81I1YOrmD24HVB/gltQUAAAMDMzU9pe87hmP1FLkMkE7DhyGwDwRJATjAx0NRwRERERAQ1McA8fPqz0uKioCG+//TZMTU0xdepUxYhqQkICNm3ahJKSEnz++efqj5aolRAEAT/vvoKLN7IgEYswmpPLiIiIWo0GJbgODg5Kj9955x1YWVlhzZo1SiO2Hh4eGDZsGF588UWsX78eS5cuVWuw5ubmAOQJto2NjWJ7YWGh0n6i5rblwA38GXkHIhGw6LlAOHc0e/yTiIiIqEWoNMns0KFDGDJkSJ0TasRiMYYOHVpr1FcdXF1dAfxTi1sjISEBurq6cHJyUvs1iR6250Q8thy4AQCYM94XAwNZe0tERNSaqJTgCoKAO3fu1Ls/Pj6+Vm2uOjg5OaFLly6IiIhQ2h4eHo7Q0FDo6emp/ZpEDzp6IQWrdsvb1z0/3BOjWJpARETU6qjUB3fIkCHYsmULHBwcMHnyZBgaGgIAysrKsGXLFmzbtg1jxoxp9HnLyspw7NgxAEBaWhqKi4sVyWyvXr1gZWWFV199FW+++SacnZ0REhKC8PBwxMTEYNOmTaq8FKIGO389A99svQQAGNPfFZOGuGs4IiIiIqqLSFBhqLWoqAjz5s1DVFQUdHR0YGtrCwDIyspCdXU1AgMD8dNPP9XqdvA4qampGDx4cJ37NmzYgJCQEADypXpXrVqF9PR0uLi44F//+hcGDRrU2JehUHPN5iirIO1wLSEHS1aeQmW1DE8EOWLR5ECIxex5S0RE1FIak6+plODWOHToEI4fP4709HQAgL29PQYOHIiwsLA21fCeCS49yp30Aryz4iRKyqsR3N0O777QCzqSJq+RQkRERI3QmHxN5aV6AXmpQs3SukTaKD27GEt+Po2S8mr0cLXG29ODmdwSERG1ck1KcEtLS3H+/HmkpaUBkLcTCw4OhpGRkVqCI9KknIIyvL/yNPKLKuBib4b3XwyBvq5E02ERERHRY6ic4G7cuBHffvstSktLlTomGBsbY9GiRZg6dapaAiTShKLSSiz5+TSyckvRqYMxPno5FMaGXKmMiIioLVApwd29ezc+++wz+Pv7Y/r06Ur9aTdu3IjPPvsMJiYmal+ql6gllFdU46PVZ5CcUQQrMwN8MqcPLE0NNB0WERERNZBKCe7atWsRHByMdevWQSL555atp6cnhg0bhhdeeAFr165lgkttTrVUhqXrz+NGUh5MDHXx8cuhsLNiyQ0REVFbotJsmTt37mD48OFKyW0NiUSC4cOHP3IhCKLW6tjFVFy8kQV9PQk+mN0bnTtxCV4iIqK2RqUE19TUFKmpqfXuT01NhYmJicpBEWnK8cvyCZMTwrrBs7OVhqMhIiIiVaiU4A4cOBCbNm3Cn3/+WWtfeHg4Nm/e3KSFF4g0obCkEtE37wEA+vs7aDgaIiIiUpVKNbhvvvkmLl++jDfffBOff/45unTpAgBITExEdnY2XF1d8cYbb6gzTqJmd+bqXUhlAlztzeFgwzsQREREbZVKCa6VlRV27dqFrVu3Kq1k5u7ujtmzZ2PSpEnQ19dXa6BEze3k/fKEfv72Go6EiIiImkLlPrj6+vqYMWMGZsyYoc54iDSioLgC0bezAQB9/ZjgEhERtWVNWsnsQYIg4MyZM6isrERQUBAnmVGbcvrKXchkAtwczWHfgT+7REREbZlKCe4333yDixcvYuPGjQDkye2LL76IM2fOQBAE2NvbY926dXB2dlZrsETN5WT0/fIEP04uIyIiautU6qLw119/wdfXV/E4IiICp0+fxsKFC7Fy5UpIpVIsX75cbUESNaf8ogpcuV+e0I/lCURERG2eSiO4mZmZ6Ny5s+LxwYMH0bVrV8yZMwcA8Nxzz2HLli3qiZComZ2+kg6ZAHRzskBHa2NNh0NERERNpNIIro6ODiorKwHIyxNOnz6N/v37K/ZbW1sjLy9PPRESNbMTl+VdQFieQEREpB1USnC7deuGPXv2oKCgADt27EB+fj4GDhyo2J+eng5LS0u1BUnUXPIKy3E1geUJRERE2kSlEoX58+dj7ty56N27NwAgMDBQ8XcAOHbsGHx8fNQTIVEzOhWTDkEAPDpbwtbKSNPhEBERkRqolOD27dsXu3btQmRkJMzMzDBy5EjFvoKCAvTs2RODBw9WW5BEzeVENMsTiIiItI3KfXC7du2Krl271tpubm6Od999t0lBEbWEnIIyXL+TAwDo68vyBCIiIm2hUg0ukTaIvF+e4NXFCjaWhpoOh4iIiNSkQSO4np6eEIvFuHz5MvT09ODp6QmRSPTI54hEIly/fl0tQRI1h5OK7gkcvSUiItImDUpw58+fD5FIBB0dHaXHRG1Vdn4ZYhNzIRIBfZngEhERaZUGJbivvvrqIx8TtTWRMfLR2+4u1rA2Z3kCERGRNmENLrVLJy6nAWB5AhERkTZSuYtCbm4uVq1ahWPHjiEtTZ4sODg4YODAgZg1axY6dOigtiCJ1CkrtxQ3kvIgEgF92D2BiIhI66g0gnvr1i2MGTMGa9euhampKYYPH47hw4fD1NQUa9euxdixY3Hz5k11x0qkFjXlCd6uHWBlZqDhaIiIiEjdVBrB/fjjjyGVSrF9+3b4+voq7YuJicHs2bPxySefYOPGjWoJkkidTkbfL0/w5+gtERGRNlJpBDcmJgbTp0+vldwCgK+vL6ZPn46YmJgmB0ekbhk5JbiZnA+xCAj16aTpcIiIiKgZqJTgWltbQ19fv979+vr6sLa2VjkoouYSeX9pXm+3DrA0ZXkCERGRNlIpwZ0+fTq2bNmCe/fu1dqXmZmJLVu2YPr06U0OjkjdasoT+vs7aDgSIiIiai4q1eAKggAjIyM8+eSTGDJkCDp37gwASExMxOHDh+Hs7AxBELB27VrFc0QiEV544QW1BE2kirvZJbidWgCxWMTyBCIiIi2mUoL7xRdfKP6+d+/eWvtv3LihdAzABJc0r2b01rdrB5ib1F9iQ0RERG2bSgnu4cOH1R0HUbM7eVlef9vPj+UJRERE2kylBNfBgQkCaY4gCIi5nQ0bC0PY25g06Dlp94qRkF4ACcsTiIiItF6DE9yYmBg4OzvDwsLiscempKTgwoULeOqpp5oQGlHddh29jbX7rgMAujpZYGCAA/r7O8Da3LDe59SUJ/i528DMWK9F4iQiIiLNaHAXhUmTJuHEiROKx/n5+fDz88O5c+dqHXvp0iW888476omQ6AE3knKxITwWACASAbdT8vHLnmuY+ckBvPtDJCJOJ6KotLLW82rKE/r7cXEHIiIibdfgEVxBEGo9rqiogFQqVXtQRHUpLqvCl5suQCoT0M/PHnPG+yIyJh3HLqYiNjEXV+KzcSU+Gyt3xSDAwxYDAhwR0qMjsvPLkHi3EDoSEXp7szyBiIhI26lUg0vU0gRBwPe/XUZWbilsrYywYKI/jA11MaqvC0b1dUFWbimOX07D8UupuJNeiPPXM3H+eib09SSwszICAPi728LEiOUJRERE2o4JLrUJB84mITI6HRKxCG9NDYKxoa7SflsrI0wI64YJYd2QnFGI45fScPxSGu7mlCA5owgA0N+f5QlERETtARNcavWSMgrx864rAIDpI73g0dnqkcc7dzTD1BFmeH64J26l5OP4pTRUVUu5ehkREVE70agENy0tDdeuXQMAFBXJR8WSkpJgZmamdFxqaqqawqP2rryyGl9ujEJltQyBHrZ4amDXBj9XJBLB3dkS7s6WzRghERERtTaNSnC/++47fPfdd0rbPvroo1rHCYIAkUjUtMiIAKz+4yqSM4pgaaqPRc8FQizmzxURERE9WoMT3KVLlzZnHES1nLichr/OJEEkAv41JRAWplxel4iIiB6vwQnu+PHjmzMOIiUZOSX4/rfLAIAJYd3g726r2YCIiIiozWjwQg9ELaVaKsN/N0WhtLwaXl2sMGWYp6ZDIiIiojaECS61Opv2x+Jmcj6MDXXx5vNB0JHwx5SIiIgajpkDtSoX47Kw4+/bAIDXnvWH7f1FGoiIiIgaigkutRq5heX435YLAICRfbqgjy8XZiAiIqLGY4JLrYJMJuB/v15AQXElunQyw4tjvTUdEhEREbVRXMmMNK6ySooN4bGIvpUNfT0J3prWE/q6Ek2HRURERG1Um0twd+7ciXfeeafW9tmzZ+PNN9/UQESkKkEQcPJyOtaFX0dWbikAYM5TPnCyM9VwZERERNSWtbkEt8bq1athavpPImRnZ6fBaKixYu/k4pe9V3EjKQ8AYG1ugBmjumNQkJOGIyMiIqK2rs0muD169ICVlZWmw6BGysgpwbo/ryMyOh0AYKAnwTNh3fDUQDcY6LXZH0ciIiJqRZhRUIsoLq3EtkM3se/kHVRLZRCLgCG9OuP54Z6wMjPQdHhERESkRdpsgjt69Gjk5eXB3t4ezz77LF566SVIJJyY1NpUS2UIP3UHWw/cQFFpFQDA390GL47pARd7cw1HR0RERNqozSW4NjY2ePXVV+Hn5weRSIQjR47g22+/RWZmJpYsWaLp8OgBZ6/exZq915CeXQIAcLIzxYtjeiDI0xYikUjD0REREZG2anMJbv/+/dG/f3/F4379+kFfXx/r16/H3LlzYWtrq8HoqEZUbCY+XXsOAGBhoo8pwz3xZC9nSLjsLhERETUzrcg2RowYAalUitjYWE2HQvftOipfbndAgANWvjMYI0K7MLklIiKiFsGMg9QuKaMQMbezIRYBL4zqASMDXU2HRERERO2IViS44eHhkEgk6N69u6ZDIQDhkXcAACHenWBjaajhaIiIiKi9aXM1uLNmzUJISAg8PDwAAIcPH8b27dsxffp02NjYaDg6KimrwpGoFADAqL4uGo6GiIiI2qM2l+C6uLhgx44dyMjIgEwmQ5cuXfDuu+9i2rRpmg6NAByJSkF5pRROdibw7dpB0+EQERFRO9TmEtz//Oc/mg6B6iGTCfgzMgEAMKqvK1uBERERkUZoRQ0utQ7Rt+4h7V4JDPV1MCjIUdPhEBERUTvFBJfU5s/7k8sGBzuxcwIRERFpDBNcUovM3FKcv54BABjZh5PLiIiISHOY4JJa7D91BzIB8O9mAyc7U02HQ0RERO0YE1xqsooqKQ6cTQYAjOrH0VsiIiLSLCa41GQnLqWhqLQSNpaGCO7eUdPhEBERUTvHBJeaRBD+aQ02so8LJGK2BiMiIiLNYoJLTXIzOQ+3UwugqyPG0F7Omg6HiIiIiAkuNc2++63B+vs7wNxEX8PREBERETHBpSbIKyrHycvpAIDRnFxGRERErQQTXFLZgbNJqJbK4OFsiW5OlpoOh4iIiAgAE1xSkVQqQ8SpRABsDUZEREStCxNcUsnZaxnILiiHuYke+vnZazocIiIiIgUmuKSSP+9PLnsypDN0dSQajoaIiIjoH0xwqdGSMgoRczsbYhEwIpTlCURERNS6MMGlRqsZvQ3x7gQbS0MNR0NERESkjAkuNUpJWRX+jkoBAIzqy9FbIiIian2Y4FKjHIlKQXmlFE52JvDt2kHT4RARERHVwgSXGkwmE/BnZAIAYFRfV4hEIg1HRERERFQbE1xqsOhb95B2rwSG+joYFOSo6XCIiIiI6sQElxpszwn56O3gYCcYGehqOBoiIiKiujHBpQaJvnUPUbGZEIs4uYyIiIhaNya49FhSqQyrdl8BAIzo4wJHW1MNR0RERERUPya49Fj7TyciKaMIpka6eH64p6bDISIiInokJrj0SAXFFdgUEQcAmDrCC6ZGehqOiIiIiOjRmODSI22OiENJWRVc7M0wrHcXTYdDRERE9FhMcKleCWkFiDiTCAB4+SkfSMTse0tEREStHxNcqpMgCPh59xUIAtDf3wHebly1jIiIiNoGJrhUpxOX03AtIQd6uhLMHN1D0+EQERERNRgTXKqlvKIaa/ZeAwBMHNwNNpaGGo6IiIiIqOGY4FItvx+5hZyCcthaGWH8E101HQ4RERFRozDBJSUZOSXYefQ2AGDWmB7Q15VoOCIiIiKixmGCS0rW7L2GqmoZ/Lp1QKhPJ02HQ0RERNRoTHBJ4dKNLJy+chdisQizn/KBSMS2YERERNT2MMElAEC1VIZVf1wBAIzq64LOHc00HBERERGRapjgEgAgPPIOUjKLYWashylPemg6HCIiIiKVMcElFBRX4Ne/4gAA00Z4wcRIT8MREREREamOCS5h4/5YlJRXw9XBHENDOms6HCIiIqImYYLbzt1OyceBs0kAgDnjfSARc2IZERERtW1McNsxQRDw8+4rEARgYIAjurtYazokIiIioiZjgtuO7T2RgNjEXBjoSTBzTHdNh0NERESkFkxw26nLN7Pwy95rAIBpI71gbW6o4YiIiIiI1IMJbjuUnl2MLzZEQSYTENbTCWP6uWo6JCIiIiK1YYLbzpSWV+HTNedQXFYFD2dLzJ/gxxXLiIiISKswwW1HZDIBX2++iJTMIliZGeDdmb2gpyvRdFhEREREasUEtx3ZFBGLc9czoKsjxnsze8HKzEDTIRERERGpHRPcduL4pVT8dvgWAOC1Z/3h7myp4YiIiIiImgcT3Hbgdko+vtt2GQDwzKCueCLISbMBERERETUjJrhaLq+wHJ+tPYvKKil6etlh2kj2uyUiIiLtxgRXi1VVS7F0/XlkF5TDwcYEbz4fxKV4iYiISOsxwdVSgiDgxx0xiE3MhbGBDt6fFQJjQ11Nh0VERETU7NpkghsfH4+ZM2fC398fffv2xZdffonKykpNh9Wq7D2ZgIPnkiEWAW9NC4aDjYmmQyIiIiJqETqaDqCxCgoKMGPGDHTp0gXLly9HZmYmPv/8c5SXl2PJkiWaDq9VuHwzC7/skS/DO3NMDwR62mo4IiIiIqKW0+YS3K1bt6KkpATff/89LCwsAABSqRQfffQR5syZAzs7O80GqEFSmYDE9AKlZXjHDXDTdFhERERELarNJbjHjx9HaGioIrkFgBEjRuCDDz5AZGQknn76ac0F1wKqpTJk5ZXibnaJ/CunRPH3jJxSVEtlAMBleImIiKjdanMJbkJCAp555hmlbWZmZrCxsUFCQoKGomqaqmopikqrUFRaiWLFn5UoLqtCUWkVCksqkZkjT2az8sogkwn1nktHIoJHZyu8Na0nl+ElIiKidqnNJbiFhYUwMzOrtd3c3BwFBQUaiKjxDpxNwt4TCSgurURRWRUqKqWNer6ergSdrI3QqYMxOnUwUfp7BwtDtgIjIiKidq3NJbja4MTlNCTeLVTaJhYBxoZ6MDXShamRHkyMdGFy/7GJkR5sLQ3vJ7HGsDQ1gJhJLBEREVGd2lyCa2ZmhqKiolrbCwoKYG5uroGIGu/t6cGIT82HsYGuPJE10oORvg6TViIiIiI1aHMJrqura61a26KiIty7dw+urq4aiqpxTAx14dfNRtNhEBEREWmlNrfQw4ABA3Dq1CkUFv5ziz8iIgJisRh9+/bVYGRERERE1Bq0uQR38uTJMDY2xvz583Hy5Ens2LEDX375JSZPntyue+ASERERkVybS3DNzc2xfv16SCQSzJ8/H19//TUmTJiAxYsXazo0IiIiImoF2lwNLgC4ublh3bp1mg6DiIiIiFqhNjeCS0RERET0KExwiYiIiEirMMElIiIiIq3CBJeIiIiItAoTXCIiIiLSKkxwiYiIiEirtMk2YeqWlZUFqVSKwYMHazoUIiIiIqrD3bt3IZFIGnQsR3AB6OvrQ0eHuT4RERFRa6WjowN9ff0GHSsSBEFo5niIiIiIiFoMR3CJiIiISKswwSUiIiIircIEl4iIiIi0ChNcIiIiItIqTHCJiIiISKswwSUiIiIircIEl4iIiIi0ChNcIiIiItIqTHCJiIiISKswwSUiIiIircIEl4iIiIi0ChNcIiIiItIqTHBbUHx8PGbOnAl/f3/07dsXX375JSorKzUdVruUlJSEJUuWYNy4cejevTtGjx6t6ZDapf3792PevHkYMGAA/P39MW7cOPz+++8QBEHTobU7x44dw9SpU9G7d294e3tj8ODBWLp0KYqKijQdWrtWUlKCAQMGwMPDA1euXNF0OO3Kzp074eHhUevrq6++0nRo1AA6mg6gvSgoKMCMGTPQpUsXLF++HJmZmfj8889RXl6OJUuWaDq8dufWrVs4duwY/Pz8IJPJmFBpyLp16+Dg4IDFixfD0tISp06dwvvvv4+MjAwsWLBA0+G1K/n5+fD19cW0adNgYWGBW7duYfny5bh16xbWrFmj6fDarR9++AFSqVTTYbRrq1evhqmpqeKxnZ2dBqOhhmKC20K2bt2KkpISfP/997CwsAAASKVSfPTRR5gzZw7/wbSwsLAwDBkyBACwePFiXL16VcMRtU8//vgjrKysFI9DQ0ORn5+PtWvX4pVXXoFYzJtMLWXcuHFKj0NCQqCnp4f3338fmZmZ/D9KA+Lj4/Hrr7/i7bffxgcffKDpcNqtHj16KP0/RW0Df3u0kOPHjyM0NFSR3ALAiBEjIJPJEBkZqbnA2ikmTq1DXb80vLy8UFxcjNLSUg1ERA+q+f+qqqpKs4G0U59++ikmT54MFxcXTYdC1Obwt3wLSUhIgKurq9I2MzMz2NjYICEhQUNREbU+Fy5cgJ2dHUxMTDQdSrsklUpRUVGBa9euYcWKFQgLC4Ojo6Omw2p3IiIicPPmTcyfP1/TobR7o0ePhpeXFwYPHoyVK1eyZKSNYIlCCyksLISZmVmt7ebm5igoKNBAREStT1RUFMLDw/H2229rOpR2a9CgQcjMzAQA9O/fH19//bWGI2p/ysrK8Pnnn2PRokX8oKdBNjY2ePXVV+Hn5weRSIQjR47g22+/RWZmJufOtAFMcImoVcjIyMCiRYsQEhKC6dOnazqcduvnn39GWVkZbt++jR9//BFz587F2rVrIZFINB1au/Hjjz/C2toazzzzjKZDadf69++P/v37Kx7369cP+vr6WL9+PebOnQtbW1sNRkePwxKFFmJmZlZnu52CggKYm5trICKi1qOwsBCzZ8+GhYUFli9fzhppDfL09ERAQAAmTpyIH374AWfPnsXBgwc1HVa7kZaWhjVr1uC1115DUVERCgsLFfXopaWlKCkp0XCE7duIESMglUoRGxur6VDoMTiC20JcXV1r1doWFRXh3r17tWpzidqT8vJyzJkzB0VFRdi2bZtSOx7SLA8PD+jq6iI5OVnTobQbqampqKqqwssvv1xr3/Tp0+Hn54ft27drIDKitoUJbgsZMGAAfvrpJ6Va3IiICIjFYvTt21fD0RFpRnV1NRYuXIiEhARs3ryZrahamejoaFRVVXGSWQvy8vLChg0blLbFxsZi6dKl+Oijj+Dj46OhyAgAwsPDIZFI0L17d02HQo/BBLeFTJ48GRs3bsT8+fMxZ84cZGZm4ssvv8TkyZP5S10DysrKcOzYMQDyW4LFxcWIiIgAAPTq1Ys9D1vIRx99hL///huLFy9GcXExLl++rNjXvXt36OnpaS64dmbBggXw9vaGh4cHDAwMEBcXh19++QUeHh6KntHU/MzMzBASElLnvh49eqBHjx4tHFH7NWvWLISEhMDDwwMAcPjwYWzfvh3Tp0+HjY2NhqOjxxEJXMKpxcTHx+OTTz7BpUuXYGxsjHHjxmHRokX8Ja4BqampGDx4cJ37NmzYUO8vGFKvsLAwpKWl1bnv8OHDHDlsQT///DPCw8ORnJwMQRDg4OCAoUOHYtasWZzJr2Fnz57F9OnT8fvvv3MEtwV9+umnOHHiBDIyMiCTydClSxdMnDgR06ZNg0gk0nR49BhMcImIiIhIq3CqMhERERFpFSa4RERERKRVmOASERERkVZhgktEREREWoUJLhERERFpFSa4RERERKRVmOASERERkVZhgktE1AyWL1+uWAGpNdi5cyc8PDwUX7m5uU0+59mzZ+Hh4YGzZ8+qIULNio2NVXp/alY2JKK2iUv1ElGrtXPnTrzzzjuKxxKJBNbW1ujbty8WLVrU4stcP2rltQctXbq0BaJRzTvvvANLS0ul1ckWL16MXbt2wdjYGKdOnYKBgYHScxITEzFs2DAAwFtvvYVZs2a1aMx79+5FTk4OXnjhhWa7hr29Pb788kskJCTgp59+arbrEFHLYIJLRK3ea6+9BkdHR1RWVuLy5cvYtWsXLly4gH379kFfX7/F4nj33XdRUlKieHz8+HHs27dPkTTWCAwMxNixY/Hyyy+3WGwNNWTIkDqXQNbR0UF5eTmOHDmCkSNHKu3bu3cv9PX1UVFRobQ9ODgYMTEx0NXVbdaY9+3bh1u3bjVrgmtubo5x48bh7NmzTHCJtAATXCJq9QYMGAAfHx8AwMSJE2FpaYlVq1bh8OHDtZKx5jRkyBClx9nZ2di3b98jk8a2Qk9PD4GBgfjzzz9rvaf79u3DE088gb/++ktpu1gsbtEPGEREDcUaXCJqc3r27AkASElJUWybNm0apk2bVuvYxYsXIywsTPE4NTUVHh4e+OWXX7Bt2zYMGTIE3t7eeOaZZxATE6O2GOuqwfXw8MDHH3+M/fv3Y+TIkfD19cWkSZNw48YNAMDWrVsxdOhQ+Pj4YNq0aUhNTa113ujoaMyaNQtBQUHw8/PD1KlTceHCBbXEPHr0aBw/fhyFhYWKbTExMUhMTMTo0aNrHV9XDe60adMwevRo3L59G9OmTYOfnx/69++PVatWKT23pib44df48DmnTZuGo0ePIi0tTVEf++D3s7KyEsuWLcPQoUPh7e2NgQMH4ssvv0RlZaXSeSMjI/Hcc8+hZ8+eCAgIwLBhw/C///1P9TeLiFq1tjO8QER0X00drJmZmcrn2LdvH0pKSjBp0iSIRCKsXr0ar776Kg4dOtSst9yjoqJw5MgRTJkyBQDw888/Y+7cuXjppZfw66+/YsqUKSgoKMDq1avx7rvvYsOGDYrnnj59GrNnz4a3tzcWLFgAkUiEnTt3YsaMGfj111/h6+vbpNiGDh2KDz74AAcOHMCECRMAyN8nV1dXdO/evcHnKSgowEsvvYShQ4dixIgR+Ouvv/DVV1/B3d0dAwcObFRMc+fORVFRETIyMhT12MbGxgAAmUyGefPm4cKFC3j22Wfh5uaGmzdvYv369UhMTMQPP/wAALh16xbmzJkDDw8PvPbaa9DT00NSUhIuXrzYqFiIqO1ggktErV5xcTFyc3NRWVmJ6OhofP/999DT08OgQYNUPmd6ejoOHDgAc3NzAICLiwteeeUVnDx5sknnfZw7d+5g//79ipIGc3NzLFmyBD/++CMiIiIUk79kMhlWrlyJ1NRUODo6QhAEfPjhhwgJCcHq1ashEokAAJMnT8aoUaPw7bffYs2aNU2KzcTEBE888QT27duHCRMmQCaTITw8HJMnT27UebKysvDFF1/gqaeeAgBMmDABYWFh2LFjR6MT3L59+2LDhg0oLCzEuHHjlPbt3bsXp06dwsaNGxWj+gDQrVs3fPDBB7h48SICAwMRGRmJqqoqrFq1ClZWVo26PhG1TUxwiajVe3hykYODA/773/+iY8eOKp9z5MiRiuQWqLvsoTmEhoYq1ev6+fkBAJ588kmlzgY1o7EpKSlwdHREbGwsEhMTMW/ePOTl5dU65x9//AGZTAaxuGmVZ2PGjMHrr7+Oe/fu4datW7h37x7GjBnTqHMYGRkpJaN6enrw8fFR+3sbEREBNzc3uLq6KrU96927NwB5uUNgYKBipP/w4cN45plnmvweEVHrxwSXiFq9JUuWwMXFBUVFRdixYwfOnz8PPT29Jp2zU6dOSo9rkt0H60+bw8PXrUlqH07WTU1NleJJTEwEALz99tv1nruoqEgpaVfFwIEDYWxsjPDwcMTFxcHHxwedO3eusx64Ph07dlSMMNcwNzdX1BqrS1JSEuLj4xEaGlrn/pycHADyDzO//fYb/vOf/+Drr79GaGgohg4diuHDhzPZJdJSTHCJqNXz9fVVdFEYMmQIpkyZgjfeeAMRERGKesz6SKXSOrdLJJI6twuC0LRgH6O+6z4unpo/33rrLXh5edV5rJGRUZPj09PTw9ChQ7F7926kpKRgwYIFjT5Hfa/lQQ8nwDVkMlmDryOTyeDu7q7UK/lBNR8aDAwMsHnzZpw9exZHjx7FiRMnEB4ejm3btmHNmjUNipeI2hYmuETUpkgkEvzrX//C9OnTsXnzZkWvWXNz8zpvgaenp7d0iM3CyckJgHzEt0+fPs16rTFjxmDHjh0Qi8UYNWpUs1yjpmygqKhIaXtdC2nUlww7OzsjLi4OoaGh9R5TQywWIzQ0FKGhoXjnnXfw008/4ZtvvsHZs2eb/f0kopbHezNE1OaEhITA19cX69evVyw+4OTkhISEBKVazLi4OK2ZKe/t7Q1nZ2esWbNGabGJGupYerdGSEgIXn/9dbz//vuwsbFR23kf5OzsDAA4f/68YptUKsX27dtrHWtoaFgrEQaAESNGIDMzs87nlJeXo7S0FACQn59fa3/NKPjD7cSISDtwBJeI2qRZs2bh9ddfx86dO/Hcc89hwoQJWLduHWbNmoUJEyYgJycHW7duRdeuXetMCNsasViMTz/9FLNnz8bo0aPx9NNPw87ODpmZmTh79ixMTEzUtgKXWCzGK6+8opZz1adbt27w9/fH//73PxQUFMDc3Bzh4eGorq6udWyPHj0QHh6OpUuXwsfHB0ZGRggLC8O4ceOwf/9+fPDBB4oJZVKpFAkJCYiIiMDq1avh4+ODFStWICoqCgMHDoSDgwNycnLw66+/omPHjggKCmrW10lEmsEEl4japCeffFIxolnTA/WLL77AsmXLsHTpUnTt2hVffvkl9u3bh3Pnzmk6XLUICQnBtm3b8MMPP2DTpk0oLS2FjY2NYsGItuarr77CkiVL8PPPP8PMzAwTJkxASEgIZs6cqXTclClTEBsbi507d2LdunVwcHBAWFgYxGIxVqxYgXXr1uGPP/7AwYMHYWhoCEdHR0ybNg0uLi4AgLCwMKSlpWHHjh3Iy8uDpaUlevXqhVdffVUxmY+ItItIaO4ZFUREpHE7d+7EO++8g127dqFjx46wtLR8bN1qeyKVSlFQUICLFy9i/vz5+O677zB8+HBNh0VEKuIILhFROzJ+/HgA8lXRuOjBP27evKlYmIKI2j6O4BIRtQNZWVm4ffu24nFwcHCzLknc1pSUlCA6Olrx2MPDA9bW1hqMiIiaggkuEREREWkVtgkjIiIiIq3CBJeIiIiItAoTXCIiIiLSKkxwiYiIiEirMMElIiIiIq3CBJeIiIiItAoTXCIiIiLSKkxwiYiIiEirMMElIiIiIq3CBJeIiIiItMr/A3iSqJh0BvETAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 437 }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[36m\u001b[1mMAPPO experiment completed\u001b[0m\n" - ] - } - ], + "id": "ui6mWeXFFr4M", + "outputId": "d7ecb0a6-c4fe-42cd-a9ae-f5fe371ac65d" + }, + "outputs": [], "source": [ + "# Run experiment for a total number of evaluations.\n", + "ep_returns = []\n", "start_time = time.time()\n", + "n_devices = len(jax.devices())\n", + "\n", "for _ in range(config[\"arch\"][\"num_evaluation\"]):\n", " # Train.\n", " learner_output = learn(learner_state)\n", @@ -1093,8 +1033,11 @@ " eval_keys = eval_keys.reshape(n_devices, -1)\n", "\n", " # Evaluate.\n", - " eval_metrics = evaluator(trained_params, eval_keys, {})\n", - " ep_returns = plot_performance(eval_metrics, ep_returns, start_time)\n", + " evaluator_output = evaluator(trained_params, eval_keys, {})\n", + " jax.block_until_ready(evaluator_output)\n", + "\n", + " mean_episode_return = jnp.mean(evaluator_output[\"episode_return\"])\n", + " ep_returns = plot_performance(mean_episode_return, ep_returns, start_time)\n", "\n", " # Update runner state to continue training.\n", " learner_state = learner_output.learner_state\n", @@ -1133,25 +1076,9 @@ "base_uri": "https://localhost:8080/" }, "id": "lMSKw2_q8YHW", - "outputId": "800c4d44-16c0-4aa5-cb7e-9b25bc08e483" + "outputId": "d9e07abc-0d14-42cc-f106-fe611ee5700f" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "MovieWriter ffmpeg unavailable; using Pillow instead.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[36m\u001b[1mEPISODE RETURN: 26.0\u001b[0m\n", - "\u001b[36m\u001b[1mEPISODE LENGTH:500\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "render_one_episode(config, trained_params)" ] @@ -1159,19 +1086,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "image/gif": "R0lGODlh9AH0AYUAAP7+/kc8igAAAACAgNDN3Ds7O+no6f6LANvZ5wB6esnZ20pAic/l5UE3f2tMb45bVP/Zq8t1JiwYAK5oPP/Mkf+3YP+TEf/CeKpdAP/ozTMpWlpEfKJkRaurq/+dJv+jNRsXEygiGygoKHtTY919GP+tSf+yVf/gvL5wMLvS0v/Snf/w3gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQACgAAACwAAAAA9AH0AQAI/wABCBxIsKDBgwgTKlzIsKHDhwUFSJxIsaLFixgzatzIsaPHjyBDihxJsqRIiChTqlzJsqXLlzBjrpRogKUBmjZxqrwpoOZKnj536hTaM2fRn0NTAs0ps6nTp1CjSp1qUKJLqy2xzhRwlWtWryy1bu1KNivVs2jTql0LU6xKtynhQpQ7F+zYr2XD2n27Ny7bv4ADC6ZK92Fhh4cZJlbcF+XihY8VRk44GWHlg40Ha97MufPAy1UzGxbdEHRE0qVRM857V+9Vz7Bjy15rmmDtz6ol59bNmm/vuLspB7c8u7jx4y9vC1QOQDnz58MxRw/92/H0iMiza99u+bpt77jxiv93Pb61ed9muatfbxx69dHv4ZdHT77+TPb483N2P996fMTgLRdgcwPyF5Z+CCZIW4EM9uefg3X9l5qEiilo4YVQGXjeg/bRtyGHH0aYHoYklhiifB2C6OGKwFEIWYMjmijjjLxBiCKLKrZoI4Au1nggjUAGGd6OE6aY45EiGpnkiYgJ6SSNGuJ4o5RT6qhklUga9uSWJUZpJZU8ErmamC/2SBmXaFroZZZFXhmmm20yGedbadap35pLfgnmnHr2ySafjtkp6Hp4YmnooW/KOSaci9436KPZFZqon3lSiiiglf5ZIaScFicpppNaGqqmZZLpo6OdpupZUigthdRRSrH/CpGrRAUVK6y32tqqrA/RGquqwO5n0rDEFmvsscgmq+xHwTYr2LLQRivttNRWy5Gz2LLFq0M8KUDAt+CGK664CHQ77rnnKrBtQ0AhgO674Krbk7vwviuvrrPSxIAC/Pbr778A00Rvvemuy9BN2SaMFnQLBODwwxBHHPECCEjUsMQYS7wAdAhcnPHHAWwsQMcggyxyVwwMoPLKLLfscgISkVxyxifHqPDNTkE3M8gESLTzx9AR8HPGEgk9tMTQKeDy0kzDLIDRR0P8HM5U58xg1BH3LADWUjMINddFc/1w0kyXzbLTX2M9ddVsJ3e12AFoDXcAQc8dNtxkm2022nYX/9j234y+OLfceHvdd9pR561303eLvTbgkF/K2OA+F94V4kc3DjaDSi/O+NN9vxb56I3yRfnWln+F+dCaq82555+v/vPjpNdO3Venh6764brr1TnsLfOd+o+2Fz+kXrkPv5XsO7ee+OvAB+985n4bb73OcBPuuOGFMz+z4tGrLPz2oltfPPZia7/55bwrz9fv4Q8w/vo2mz86+lyr7zr73fe+Ffzhm9/+6mc/yOEPa/p7Hv+2572SgS+A02Nd9QpIugNGLYHUW+DmGgiyB0ZPgAokIAXbZsGjYVCCGnQdB4EGvfjJL4Kzm+AIDfi29FWOfLvrn/uAA8APwrB5Mpzh3/9KOLQTxjCFz1sh0VoYPxBmUIRCxBkRf2ZEICKRekrEmAeB50QUQjGKCpvizqr4Pe4x0H/vc6H4fljG8oGRbWKcGRkdaMYNopGHanwh6HYYqDeSsIb5uyH99JJFpO0Rh77LYxeP+EU/OstgC+EJ3BYgr77hq1cSUYDHXKfJwl2SW5lMgChHScpSmtJpndzeJ9k1IEemyVqwjKUsZ0nLi7iyarXMpS53yUuS3JJqkFSIuQh2rnJlkpgFw9Wu5oXMcd1rYM381r2MYgBoRvOZ0QzXNJHySykyaJNHo5jFxFYzvcgMayI7Z9TKuRV1hjNm4BwaO+nUzTACEoGCHCAh24f/yOXxc5Coqie24liyOXawjiq8o3UKGTE20tGNAm0WQXmWzxDuU4f95AtDu7bRsQUxosCa6McMysIrSrCjDqsbRgFKT5BK9J4XrOgTL3pGPtYFpXQ7JEv94tKXdiV5GQUOTh16UJPGcKgf7SmnRJoxki7RqEBEKlTLKNVGKrVOTMWYU7WI0CQq9Kb/1GdAr7pUmJpQpl6kqR1tOhqkVpV4ZC3rT7OHVkaqNaFsBZBbv6qluHYqqxLbqiGnSse3+nOlYm2pXx8F2KzV1Yo5rGlQFxpWi451sXZqLMQE29CuYpGveq3sTC+L2VeatYiPbWNk1zpZsCLWsootLVZPS8XU/z50tXhtbVtFm1bSynZLmn0YZ7tG2IMaVqO8tatvf+uk4DpsuB4tLguPK9TkQhauzOWSc+Nm26Li1qt5Tc1ew7up7KJpu9BNqWdPClrxWle12DVvc2k7xu6W9LufJe+LxqvbJslXu8rMF+q4Rkn7LnGVB8tkPCWYys0hOJIKLlyDXfdgYbbyv+rppYY3zGFeYhi4HQ6xiEe8rA8/KZgJGWY2CWBMAXhrxQTYZq2s2UxswljGuaIxMm28YhzvysTz7cqCdyZOAQx5ZvMEjjvlCU9ycuzIJUsnlE1GOyDPCL0G5qp0l0hdyr52tLG1somwPOCdelmyZnYtmhPLUzFfmf++cszyYPHLXv3qhr9p7qubZUTm9jIGz2w+M2vz7N89jxnOBZVzZ7fM1S6redCBnouh+YxoipY50rv9cm+Rq2nlhnnSCuqznYUDaNhWt9PXXS6o8yPq/roX1fA97JpN3cdVX6jVhH71rMF86l1vus22DnWlR6po4tL5qH7e73tvG99gs3rYTS12dI8d1WTfednebbaz2YNrTIcW1syWNaRpLeltJ6jb5M60rz3d63HzutbmfvZcbXjpdH973anmNL5j/el4cwfd7350bnP9Z2zfV9v+1g7Afy3ogXtb1+5meLkTzm1oa1Xa6mW0IR2t7oizG94U/7fFA4vxnGq8sxz/v7fH8w3skG9n4R8XOHhdXXBwZ1vVLp8NzFne8JkTXNk2PzjOcx4bFCNEkuSspCeNMuHnNZ16FU5xhLf3dAlG/egXJnpaSMz1rnsdJFp/+dfHTnayh13hAcakiwHGdrYzoFttj7u/jH6QdsE4xgK7u48FXE295/3GdDcIws6OHOgkII8MkMjhXei0riz5Z1J2MoMeT+QmE7jKhJeN4fMor8U3UaX7Dre+V87vlme+6AzyfPw6r8bGU5uq1ia1wZ9q1dM/K/WcV3zrQU960bfb4fYuje09hXs1sp7xvAd+wDuufInrefiw2bzxdY/89SJ71N0p9fILDf1VFd+Fx/+8//WrjX3paN/53O++sLqi+vCFH4Ljh335Q3P+mD9f/ZuRPvipL/6TczT22Td7WlZ7+Ld137d6/Ad//udRKQdxzWd/6VeAt8d+uScA7cdFyedzD1dzoXdz/SaB2nKA7peAPhR/hQWA5ieAc4ZwIKgW+oeAFrh7JmhcKEh/KrhoBNiCUvGCIxiD1beA6tWAHNh7Hmh6OrggFDh9Pth/r3eC83ca9cdzE3eEf8GD0fN+JQiEOSWEQNeBQveBVEgYIniFJIiBMzhdNQiFN2hsLBiGOziGwIOFZqiFRPWFv6eBwVdebuiCcAg7cgg7rndXeLh9DjiI6Cd8e4iEX3GBcViGgP+YgflFc11IhHYIcokohkm4f0uogE1Ig0/4HVFYepZ4iW94dXWneKeUiqbUearYiqIUeAXRLVN2UFUXQ6YoeFO3ObUIRLcYi1lHioxRdsI4jB0GjAtDjMiYjLlkjGcBiwShYtnUYi/WY844EHYHeMyEjb34jPoid974d9SYdqDEjJj4FbNIMxVjZJLneOeIMZF3eQySMjJIeUiGeeTYFDsnijIXiT+nGz1khlwoGff4hvMWSPVGiEP4gFI4Gv/4iDq1gZAxkFGRj77Xc/wIkf6oSHVIe20okYoiOHR1kIeYkIYIganRkJ6zSAsZgR7pNgWJTyJpkiR5kXn4Iii5OCr/qY/315IuiTshmYaguIbTlkgyGJBnwpMyQZFFaJF1JokZWZRA+RlImZQj51gxuZKFSJMIaZMa+ZA1KZBT2RZVuVklB4lN2Y/CcZN6k5MVOYVhGTi6AVRoGYBBx5H/05VGSRxvaSbdIZcYKXt1OYBEWX15iRl7yZfS4ZdfCZheaJdpBJWfKJWHCZfCoZhbeW2BuYJ3CZlOeZSTuSeT85ORKSCh2JYMiZdRuRyf+ZFxKZqdSZeNKZibSZip2RyrCZogSW+1uZGNtpsuhpqjaZu3SSqtqZvByZsb55tquTfIiYMdOZyn8ZIxdZU6qXIKWZ0nCZyvaZjQKTm5aZC+WZpL/1kXy1k2bDmeO9mdwiGdZ0WdpmmdJYmVjFGesbOb6gkg22iNGFdg7nlQ+SkQsihh7chV/wkA3eKKrpiLFMYU9xmMyvigECotDeqgEVqhFkosE/oiBQqN0SSNfieOrJSN4dh32khNOkZMPJZNe4dJGXoqfDGgE5OOMAoxSWYd9BhllodOTyZ5N0plSdWiAsKeqNWflbiPZ/mXsEmJjmmEQEogQlpbRLqkTHl922mDmemcQ9egSlmkzBef2DmTR7qYetmk0emTxlmlanilbChu1/meiEim3/Gk9RWlsjl6bYqe8KmVIxmRcBqnZgqex+mVeiqTk3inXMqSTbqlUmqkVP86lymopkOZpfepqHV6h4Mqn4XqpW6qh31KqZppp5qKp1kZppfpmX3qpH8Kk+EplBnXiWh4nKd6PFthmXuKmbH5qZZKqrVqqp06lsJVlmfIZasKqa36nJPqq88FrHQoqLuapIa6qOmZqMjKXXSKq1NKfmgalMRqcjnYnZ6KpaB6qV+aqeK6qXwaq9+6puGqq4Rqq0paqaMorXIaZ9UKrrnaqEj6qLdqr/EKpOkaqeuKr2Kqr+9qrW4Jp/9arGwaqoc6qgJbqmN6qgnLrYJYrqIKpg/brNyJriCaYPX6MPxpSUw3o12zi2W0oQrqdCTrUSgbqwJyoTAbsyHhsgQiszb/e7MYQbPVCKCZ5I1x93bHVKKvQqIjeqIEs6KgJKIqCo5L27EQprN96DmJp47wyI48urIhs6Pw2KMfU6MHS6ZW2IgfC7D3iq2OaqX7qq5M6q9RuzhKl624IZ4Ni7Fmm69VAbWZCIPDmrZke63yB7eq6bJh64fK6qrCGqhyC62IyrZ524N7W7D8yqh1O7B3K7htqzdve7ZpyrcKG7CTC7EbK7GXazaZa7ebC7lq67dOCLjCybGNS4Zj27ll+7eaq62cS7GSqp6D6zmlS7mn+6zwKrm0a7q2gbeLWIGPC7wG26UWO7fnKrqvK7bJy7CK67Cfq7GV67rHq4TT27zVS7fD/+u7xWu50Uu4sYu7nhu+oIu2qNu3Xyuv26uJ3cuumOquyhu50cq48au3iMuq6Du7q1u7kqm9esGI5ju/GduujNm+stuvLbq7blu4FUu/42q/1Bu8+fvAo1s2vbu+v3vBy5unFGyuYEnAW2HAvCvBC+u9GCzCCVy/vIqwG8w0HYy9H8zCIWy96mvD48uxKGuBCKqKNWyLIyugS/cqJktHSeyfDMqxOPvETwy1UDzFMgu1PzyN0cihNbazBsq01+TFW+y0Fqa0X0zGYdyy5GuO5CSj6/gVXEszObpOWqujI4O1XpvBGTqxZvnCFbzA95u676u/yOOaAkyaGOMAiJzIiP+8AcnZv9tqj/A7yGdayASCOA5wAJicyZgcAY3Mus35f7CaxpIMqJ7slRGgyaj8ACiHwNerwBHbq/OaaOebfCOAyra8yo58u5AsyLNKyMQbtxFDAraMyhwAyqWcuC38ptCbqtP5uA8wzLbMyEHIyjvsyqELy8zcnnu7ANBsyyjAgNQcwL8cuCZsOr4svsDsMBPQzbbsANOcywz8vw6cx9OaXvKsZrWMyhggARihAVsYzp7oycY7yqoKz6esyfycEf8Mz3/svng8oXpcR5esyfvczwt9zP67yxocy5aWvOvMzu4MzgwNwvi7uBudzUOavNzczROAyxj9yD/Ky+Y8yeP/XMkS88zQLM0i/dK6HNMnTdDNPNIBMAERUNRGHQGq7NKU/Mk7vdQD3cs0jc6GDNPLWpjZu8xArc1CPcIXS65c7bwljNVQTcpLzax8TMJ+TNKA/NBaWs8qnL7iLNU2TdXdCp1cjHQENsS8WMRUh7U5dcV+nbJQ18SiS8WGbaFSfNiKrYxWTE2OXSuPDdlDG9m5Qtk/JsqsuZ6U2R2IeTubLR2djR2YjZsuSpya/dmendmcbSoxDLYwgtp+Ctuxrdqgzdqrbay6+9q0XaayLaukfdq7zdu56626/du13dtBatvHHdyzDYb0HNrNbdq3zdy+LSqgMiqB/NPUndzbjarI/+3d3f0pnCrD0F3dmXLe3lna0r3cxn3N5K3cqW3dpSPfpfLd4v282GzfAXLf6o3e2J3ewO3cEF3c9B3gBW7g6x3fBz7dAt7W5c3d7a3gCS7cEU7hC+7erv3g4H3hEu7f1/3h8z3hPSzWFR7dHh7iJ17f4U3g8zzgGu4cLJ7i/Q3gDM7hol3ONm7iNM7eOW7eMo7gP/7K763f8G3hIu7jO97hSX7jJN7jEO7kGw7l/A3kSz7i+b3iRa7jVY7k/93lII7fQ47lRJ7lXP7lM+7l453hZP7kR87mbR7lbz7lQu7aP1znk33nkp3nlY3ney4Uib3YgC6Mfx7ohO51jY3Ed//noUKr58tk50NrtPWSomWMxjhuHXbMxlW75nAe5NLxxu4Yx+/k08+N0lDqmy8erL0ZypVeF7RqzUqO5ioO1wHt1KMNHK0OwzUe56jeybS+6qNx632c65z+6joc1x5s5WE+02Rd03LO46o768z+1Mpe0KV86lVtn7VuHcCO1sK+5W4uvMbOwwPc5Np+zsde5iju7XPd0xCV7LZu7uL+7eq+x62M6xgeyWNN7WVt7Yab6gKd7awO765u5MNO8Mz71d8L5mqe1SkdqPw+wWfd1WF95dMe1NWu6fRezfZ+1RT/7lF97vIO62de7NAu17+IlBH98CuOzDmc5vhe8Vp98WP/DvH1Huz3LtMev+wmH+PzbtY1z+03r90w3/AyL+Y0r/E2z/HuXu4fH++b3vPXrurkHvBNP/BaLvJU7sI/L/GtLa0/PEl6fbKWzXdjr3ZLzEJnf2CE3auF3vZdN+huH/cbduhEgcUdqsU7dtd6L8ZSx/dYZ8Z53xP74rNtp/fSDhyXPk6ZPvMr7ukaIxHy+IN1PZwpj/E8r/VlRJ9LE4jDbdduPcuXb+ZZT/Jo35XtvvD5bvH7bvnLqvkvo9GjzvCl7vCs3++G5PrSI+ouTupzauq1f/RKbPqTf5uVz/glns5UJ/y4TdwcTWygr/LHP9XJL4On//I5r+/MHvrpjvnB/0/9w7+axW/0K983uH82sL/7st/7tG/8AEyLyt/5lP/5vs/+z1763r/8nt/80fb8vz/+Evb+AAFA4ECCBQkKMJhQ4UKGDR0+hBhR4kSKFS1exOhQAEKMGwN8BBlS5MgABDySRDlyY8aVHTlebAlTAIGUNUFuVDBA506ePX0miGnxZUaiRY0eRZpU6dKDQyuetJnSpICoNYM+dUrxqtasEzfSrIoSp0+yZYF2lYiW6Vq2bd2+bbvVK9WwJKfWJSk3rVqIeiP67TsTr0oBOcse3nmWJVzGjR0/hpwQ8EOogz/etXyTL+XNGjs3nOwZbOYAYxEjVtwx8mrWrV0L/cywsv9lzKRDg4698Lbs3Aq/kv5o+rTZ3bpfH0ee3HVx33RJ187MXHLv6SypG/wNXPjwn9KxKwcfXjxT7wVnD4ZuuXxT6+1djo5emDvx6+bH38eff6914CXP411voAAFGhCAArOzTb75uqvvIP0ehDDCjQy4yID/wlpAgQvDmrDCDi2yUAAKQfywohAVWEC7whJgsUUXX4TxrBFNbDBCG2+MayMdd+SxRx9/BDJIIYckskgjj0QySSWXZFJJHJ+EkrUmp6SySiuvxDJLLY+Msksv3yqRohMJILNMM888E4Ex0WQTTQ1F9BBOEuWkkU4xJ0SgTT3LfNOAPPfUs08PvyS0UKT/DhQgxcwWQGAjRS1bAFH3ZLIOgUcHi1QAS0nLdDFDPwWVK/6AS28wSV2aFDb41BNMRU9DhTXW5kZ9bsOqTqUU1ff6QzC+V2UFVlZE+ysVwBoJPNbAZBFd1dRWE/w1WGk/HZZUW6PCFbZUsWrW2G7rOnBacald1rnMigW33G1F3VXFbzlcdlx5v6y2VnNZXXeufPd699ZnfVVtXoGfrPfca23KFqt9//qX1X6xjXdgiSUsl9iDrVJX11xlehjhhp2NdmKRxSuYtotTSpjdjVXl9WNjQx45ZuRKRu9ksTJeWeF2E+wYY5hlBlrKiq29F2SNtT2a25Z7RjnioJ9ujWa8/9CFd+HArOaM6Zu1zstpqL9+TOq6qPYXa8/Mxo1rwtQWKVyw325M7LDIhhht3uzWzeV02Q7Jbbj/zpFWg4t+OWmVkeZ46ZZ/BrzxouSuim6P8Z7V8Ln41gzz4Lx2vPPHh7Z3ccv3G51hzUvTu+qAPWed8uksJjxd17HDmWV3RYepdd1z5gp23BEHXufEb3d19d2PD2zGOwVogOjmM2sgzIlCVH566SWiPk7No089Ku6rxz5Z5Bvfsnzzz0c/ffWHHL993NaHP37556efR/fvnw78iLKfU/+H+K+T//53vf0RECIAXJ4AHYJA6+HPgQIqFwIkOEEKVtCCFNqIBTW4wf9G1U54wROVnzg4wg4CQIQk3KDfHng/RF0KL4xyFKdSpq/SBWZTi8qgC+vSKeOtsH2Qi4rkfFZDzszOPN2D2OlU6MPxyQUCFIBiFClwgtcR7XcfxOLhlEY8aPWQiceTiwUOMEYyHsACVQxd8XhHwzXyS3FqFMoX3bcVE5TRjhWgne/gmEU2gvByb+xi7uTYRKdkwI6HzMAR9RhIP5KujaYDJMAEOUgwOuUDh7TjBxRpxT1q0ZN93CLPrqgVSiIvKCrA5CEh0JRFSrKRDDNiU5SIxMl5sZSdC4oYU1nGM0Kwlfgi4tmCmbZIAnOSt2RdTChwSAxIwEch6ACyfmm0R17/bZi8meUsGYdMuOXSjs78UQGkyUlG8tGRrwxMNkfpFW62LijLLGMzewQCEURTWdMsXDWLeM28qbOT7Gyn567igV2OUZOsJKcrzQlLfjbHn+UkZUBx6RQIFPQAVERoGiEKyk+eM5Tx0aYtJQq2rZRglybIY0KNic596lM0xaRmHEdKvqwYMpUrSKlGFdpRhrqUmFzcKUBn+jcnXsCoR73AKnM6uHV6lKfWHJ4o/5mWoQIOiDYRYtNiCcGtIuuhQaVqVbsJOqZOtacsFWZUQdrUv4h1rIIzWexU59O7NXQ6X13pU9z6NgMOkHnOI8334qTAhjAwfHayHi1Twr3t9XWB/+LbK6zqN1nKVtayS4rs1y67Wc521rKZhZpjCytahhj2gKRdiGlPi9jDEra0qFWIav3q2tRCFrSGQhQDdLtb3vbWt8jybXCFy4AZOpWj+johCi24kuQql4JLvK3IEJWABfmEuAKgbnUTU9yzLtSGOsRQDmXIuehObLra5cmbsove1NA1b11VFl5jqtfyxuy86NWJevE7gPaiFTfw7ZXD2NqX+tq3XOtFr37x21/vttS/2IRpPmVaYPMeeL8DUDB7uQvVB/czwrITKYXFdV/8Zli7DH6qgxv8UqDmNaIiHhiJE7wRBFcXxcc1bo4h2eL5vhjG85Kxdk1s4w2rOMUslv/qRsP6YyBbeL9DXtCNdczhFf80yWBtK5ObbJ0aVxfK85Fyd4/8352t1awE1rK8guxlGu83zFQec13VKuAzUybNanZyidu84CKntcMO/fBcJ3xnaa15QV/mzpuNjGMx/5HHEqYvoQud5xlj1819JrN7Af1oEB9T0sKitJD3rGEPxvm9ZaazkrP8aWAZej6IHo6i/VxlOdvuyi4WKqtj5WruwPo0ss70n6uDameF1NO6BhWvh+Nr1GC61rT2MKcFHWlkh0rZp2H2YYD9bFNXbs7FHrCdqy3ZULPZ0nwuNaPh7Ohb9zjX48YtawuI3RjVG0YpoLG99c0i2CZEto+V92r/aRvbAOOFscARLIngbW3PNtzhD9/SwpMNcYpX3OJGkji5Bu7vfhvk368N+GwHO/L+kTyAg8p4oRDlXA1iUFMsX266p7zo/TQX5syFeQWhm3IotZBTHQRvVXio6WETnXY3hJR4cUhenhOMrHEN96y7XXRbm1nVaG56l65ak6zeDMAy33G7Ie3jrDsdrjWTa9nsSruvy3fs7y77jbYuFZt17ettD7Tajx13ip19anUnzN3XfkS3d3rQfLfR3FHSdbsPnj2Oh2Dhp012xENI8XYBfNvAvu6ZI9nqWMZ65fvuEnwaHtreFjbtJK/3w4teP5cfCeMDD3lk4V3arKe261///3S0Rz3Yp6f6R1MNenHr/kGwF4nsNS94oxM+73ULsfHBg/yQKL9vm6d5o914e+jvXfrhoT5IrK8Z5qfe+dyvpfe/r5zwXybz1y8/8FX//PS3fv3T5/3f09598z+++bKkvyFSv/s7jvbzj/2rP/k7ItsTO9OjPAJMDgMcv82Jv6mbP/QTQPuDwALMv7F5P/KjPWVhwM/DtSXbQPbrwLn5QAoMQWeLtgacPLg7wdfouIIIkecZHBwcjIQ7uZLzwTppLMVCCR68kxnEv4tDwiREQiNEQSV0wif0LCaMwJADuI3zuBociI8DOSu0QSzMQi8UCC2sLSocLTJ8LSmcmQjKuf8Jcrk1fC7skzp1KyKbYzmcc8MSij40jJtyCbqogKFEGa8KlEPPQDpMUbqkYzo9DJsUjJwVRB1B7DwrI0F3M0FFjAwJdEQXRD0FBMCR4IAJAMVQnAAHWBvfkw1LjBpGDKJMhMPfs8DzC4kHSCUSKMU60whUFBq/80AEzEBO5KoWbJgFKKgH0Dxj00BcBBNVxCpWhETtC7uPmACLupSCe7tKREbGwERe1CpgHEEHsKgDmIDrM8bcu0a4yEZTPLX/+0W1ioBvPABS3JxxfMByDBzSUylKdEbOy8esAQlZdMcIyBx09A16dIxztEVXHESE3L4AaMd4AicekQANiEeBlAz/gtxDXVRBbfQ6bgTG0fDGMnpIH5nIgzxFizRHZeQ6ZuRIdfSqkGjIMZInH5FI1JFHGTTJpTDIq8s+feRJfgSJDfhGDhBHivyOm3SLnCS+OIxEbmO3j+AAiypGorQPo6xHmSi9GFzKdOy/yBuJYYxKkjQOqmQLpCzBfVRKs5TEj/DHQwLIr9TJWxTLtSBLfOzJs6zLtFTLB9DLvXwAeBxKsBzIuCQPlKQ7jWw8lhTBjgzAbRxAwQTGq8S9V/S/rWxJDGTMY3TMx7zHakRLpszKTYPByLTJzHQJLiSIGwQs6AFDE1pNMSQ4MxzDIAxC0/xC0kwKKMTN3KQs2zwU3fTN/99MH948itYkTtjkOOM8TtoMw+JUTtZEzit8zi6MztO0LeFcuTtswzvEQ8TUxOqgQ+eyQzfcOeH0RWTpQ5v4w/OsiaGjzMTkTk1Rz5TIlEJ8ofEkz4TkDcjkv/J0z/akxr2RSge5z6/TzwSUzHX0TyHcmgAVkAEl0M10QPzUSv78T3ipSWt00M9Eo7J6S4XszAnlitXbz3nM0J3kjALtxQOtPcW0zI1szBIF0blA0cuk0FaEsBY9TMyE0Rjdixl10QQdweEry9Db0bvMTwjFyg/dRBWNr8X8UXIsUg8NDB/N0RoNUnADzIqMUiXdUKjLUrZbyaoTUrosvi21yxNFUv/R1FAwfU8RNdDRNNMFtEedGlITPdM7xUtvYVACiVM7xQ0qnb33vFI9/dKp7FPPZBhAXb4wrVE3TVE4PdT+xApFhT9GZdIK9ZcLXbVIDb7eSdMRldAlDdW7ctIqJVFOnUvONFIe5dILDE1QxVBOXVGr/NQ3XVM5bdNSDVQdPdRUjdBbncxG1dVFfdE+LU4dRDtkrQsiTKzmdM3k1B6Ew1SbYNbwkdXmAM5s1VYrudbp2NZvBVck6VbacVbm/MEEMjl0PddmTdcGGtdgvdRj6U42BVJLhdRetVFW9VNEXVVRBVYEpdB3hddRpdd4bcZ+7VSCNVSBnVdcNacLqICIldj/ClCqgf3XWa1X7hRYgDXYNaooTOoli21Vh7XSFtxYjC1ZPyKoVMIjkUXYgu1YjT3ZhnVZqYOnXUokjlXYmsVTft3XsNxYmtVZMdOlXToolI3ZjPXPk5XUpNWiCnDHihVapN3ZoR1ZAQ3afPVXDrOpbwzZqW3aqqXaizUQpgXbAQmKC2CmkNQRaApbsj1brdXSrD3YWSvaMWLbHRGnuK3bngVahpVbmPWom4XJvKWnaOJbe4Vbsw1ckuWold2llh3bq7Xal3VcsS3bmW1cnuW2j02lkJ1cy+Vcn/XbrSXScU1cPzKpVKKAy4Xbza3cnw1MulXcdetaO/IAwX3dvpXS/zJ919Q1pwyAgOElXgjI2dHVV9lN2N3NQ3zl3eQt3eWl3NBVXt3d1N+FXert3ei1XtGNXe513etF3ex9W+/V3u1FX9JNX+gFDcYt1+n8Qvh1zvel33ZtLfudt3Jl3HDl3/79kf313wD2X/fFX4Fb18MqgARW4AVm4AZO4I1w4AiOYJwoYJE74AJi3Oc1Xc6Q4A5eYAj2YA/+ito138ylXUF9zxAWYQFQ4Q4eYRSW2RNW2nhtYQkG4Rpu4Bee4UvNYBIG36bAYQe+4SBWYB1O2aXVXA2W3sAgYgYe4iY2Yqfl4ST24fVtjib+YBbG4geeiSq24rkFXCXuXg7e4gJ4YuoijmLMPdsehuEaLWMz1uItTmPmjeEw9mL25Y03PuMgnuPpLd8Srk7eBF4aLuM9xuE+BmTyDWTbHGQ1jmMsNuQaRuTqDV9AZuMdfl09fmQo7uI2Dlgq9mRH1mRNJoA7xuO/xV4xrmQyluNNRuNOxuTFBeVYTuRRLmRYPuJPluFcFuVbJmVT3mC43GUpzmRfvuVSDmU1vmReLuZW/uVkluVhdmRgtOVWRmZatuRZZmY/juQW7mYVnuQf/t4vLkpppmM3NmZrBuYl9t3xrd8LtuA6qWZIxmV2hed4LkLNFeB9DlcA5ud/Bs4HCggAIfkEAAoAAAAsggAbANsAugGF/v7+RzyKAAAAAICA0M3cOzs76ejp/osA29nnAHp6ydnbSkCIz+XlQTd/jVpVaktvy3UmsGk6LBgApWVC/9ir/7hkWUR8ql0AMChd/5QTelJkm2BK/+nQ23wZq6ur/6Q3/8J5GxkXKCQfKCgou9LS/5wk/6tG/7JW/8aC/9CY/+G+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLBgQQECFgRYyLChQ4cLECBU+LDiwwUIDWrcKBAhAooWQwbAKOCjSJEkOaokiJDBgJcwY8qcmcAjyJMXV3JEiFMkAZ49LWbUqRIhgaBCBRxF+nAo0Y0IFcycSrWmUqYPnxoEipXhTwFdGzrVevBq2IVGz6IVQFZjVKpwY1pderbtQK5hv6oNMNYugLRqAZ/ta/dt3Lhz9/r9C1at3sBsF3c0O5hyWMJtDR+uKjjsYrxdH1eWPJnuZctdMZPVvJlm566fG58VfZo0Y9OpUWNVrZV1a7mvscbeSzu37eBMkSPl/dT375eJ1Q53DHp3ZMnKg2bvyZyo8+fR6/r/rc60uPXjupOnX349ttTnMsN7Hi87L3n26HFb168e/Xv4MMkHG33E3adde/Txx56CB/oHIHDrBTXdbAZyh2BhEVrIoIUOPgjddjhNaF99xpEG4kknitSdTt/9JqBwBFJH4nkmZohTiiGtuFKLrb3IlIihVXjjhZnZiKKRKhK5mgD/PegjUkBiZV5/NW44pJVHdujhkxLGSOGMVGKHZI5jJqWlkzhaFGV5QmZZ5V5pVqRjUUx6+GGZFa2J1JT4vQkZlkmeCSCXPekZFJ8N+lkZoGQKCh+hIdplQJshLaAApWQaINmkTN5knQKeqqfpYpwqkMCpqKaq6qpWgQoZQrDG/yrrrLTWauutuOaq66689urrr8AGK+ywxBZr7LHIJqvsnAWVSsCz0EYrrbQIODvttdNeKsCofnFqAALYhguttt+KKy65myJUrrnYkgsuu9cqEFuoQUU00Vkp1WhSWCTt21W+YvqLVb/09rSAoT0hyqGipzFqJsO5OZynlyPCqWRveDaVsUPMQrWxWB8zhDBOCg+Zn8USa3zyn4pRHCSYfYqZMschr7Xyoi1jWCDMicqMssU3N5xzkTsDDfF+RvvMsnQuS4npw0rjDFnQEQ+9ZNFTH63ezCBTjTTTOsuYdGxcMxSnylovaDXGWI+W9oFl2/y2hms3x3PCT8t5sd1x8/9Vs99eb123d3eTnDfaUQuddeJVg0202IuT/XPkCU4uXthfjl350m4z/vXlj2dOOYZ9n01z4Go7fjXknUvOeW1zXzk4i4WfVLKbngs+epGl/z2y7YefHvuRvaMO9+w71u5T8F0Pn2TxzpPZ9+/LK99o9ElBn3vqoGvFqVqWMm/2tul2CpmrlXEraVQFW4j+acvGL//89Ndv//3456///rd2GxW816rW/wAoLXSRSl3vIuC4EKjAaBnQf9tKYAPd1cBxzQtfEkkIvjrmlpK07yQE2+DeCCcwpoSQX9QLye0ChT29ac91Uuse21gHu+0db3dL6t30msYm60ENhopr3eb/YjgfzFUMhxjToeZIZ7kihu6IQmTi60pkQ7qpboaiiyLvmkhFIDZOhnZrWw29+LkxDjGITlxdFs0oRSJ28YxfTCMWocjGLU6RRlWU3RXDSMM3thGNfrSjG2FkxJctUZCAxCMZdQdGwokxkDnkoiLhWMYBFdJpPtSb8ayoxUje8Uc83JP45JZH4h3Sk4ME5SV7eMokSjJMi+SeHPm4Rki68pMxi+UNG0m7R07yj3G0Jd9eCaVQHmqUgGuhxl5ISUbO0pF9/CUigylNVCZSlU80JBKHicue6ZKTz1zJ9/ClLYupry2l+uCR3pebc5IlnedTZ5L4R8962vOe+MynPvc5/ywIKuCfAA2oQAfKgFIN9KAIVVf51lVBClaQAA9cXwQfClEGPlRe9EmAnQbAAIRodEsc3IoHRVhCpAAsNiWtl03wFZuPekhbLh1USMuixG0Srqa8TF5MAQRTO1llk3rs5C1TWUwM7RQ+PQXpCGmH0zpak5rYXNJRn5NUNC01eU0V5k2J2SWjbrSqMr0qnbJazaFes6hFmupvwPqombLkb6ZrXimfhzw6qbU1bAWPW+8C174C1ZR7JNxdN5NXF+21NFxd2FylV9edCGCwhylsjw57m8SaTJk026FX7STZzfwUsyBjJjArSci0ftWjPqVsXMcn2mmSNqoYg2xcOnuYz/8uNnut5CZRu2pazqJWqX+lq02ZatmTtPS0j02tWB1LVlg2U5aW7O1Lf2vV4DJ2uFgtrkiO69vkAhe0rM3tVrtZqIwiV7acWa7HmpvL5+4ynDo9r3Kti1vsjlW7IeHudL1bXfCupbVPfS1apSrf797WheIlLnkjhU6PsurBq4IphCd8KoUe0HyVYad13Ok99sXzVfwMsYhHTOISm9jE/qSoAJlE0Yg2eKIXtWhDLQxBAzAAoTgOqIwneEF+ZVCeFjkpfVJqsJXyS7UCcIlyiYyTgxkTb5lE3DeD6lSMNSmsms0mJhOc3QXjzj0bhZRxn2y4KAvvwMvkMp2u3FbfkRn/eGaW65QBK1S7sVmvbl6lKOM8Pvoi2L6OvbNh86xlVgJ6vfj9IX0EPVlCq5GOWlXwbhULZuVm+dHarPN4J31ZMTHas6tdSAotssLroTmzag50mEMdgFFXpNSKHq0zI528T9eW1a5+CKw16V+/AdjKq3b0HDNdZd2e1ZuLDvalh73lQ3eQvcjGkK0Rg+s3Vy/ViPYyCz2t7MZ6zJfOlTV0y2rnbge2l9EMt2tnTW7CTRsuYt6utVWITCRDm9LJtrS3Owju9or7vbRes7lzSqd+R3vd41b3kt6d3mXTEtLtlvSx8S3tgcOXI+PkVzkhw+GnwDPDQE5Kx4lSKgpT2MOV/zmxylfO8pa7/OW2SvFDV6yAFtNYogyd4I4V6OJ37pyADm1oj//1YxGih8kgNPK/kIx0lCh9YLl2yK6l7F5wBpy5iZ6Yno/JZ1LOWbialjhUB8xsQ4e9y5z+ctWpHF1MN/vs99W2qb9+XYI71uAURzjAI472iTO40HvG9rOzTvV/W720bjd7sTftd7Ubnu2IL3vgnS3Se3ea7vW1+7fT7W+9H17hZh07bxM/ebhjXe6x9jzkYftwYl8922nfNub/rHl+c/7gAWY36I0t+vJuHcqCrzzhzzz7NJ87eXi//NrpvHixC3j0kud68Gk6fDkvH+wXv/vt8577hHe++3tnPf80a8n3uMd+7teve/Y3T/7dM773yn8889sefeBTnvqo5/WpQ7tvkSbf8ao3f+V3euenJhIFPhuXPuWjYerBgOwxcjrxcafhgAcCcxZ4gRiYgRoYP/6UYzlWUAM0Y+RzYTnHcz8HQD3XYTAmgiUIdPr1IB2lQUemXh3UdJXydCbEdCF3EThoUi/IU/VGg8KXf4UXgNhHf3aDXlSRgM3Xd/AHgOD3eWSXhMg1fW9leVAYes/ne9L1IEz4eoNHhMSXfpm3fh2khFPxhQMIe40ne2RIe2YoUmg4E2rofs6ne9+nhXgIfVTYXVbIV1johvJ3hJHXh/v1h4glhtY3iOqHhIL/VYX3d4XV12e9Vm2bdYiRCIiT6HVvaHy1J4eQaHps+ISCaISNWIiP6IeZmIgFqH/Fh2rHZ1eh2ITm14box4hl6Ii0M4cyUYd5yHtbGH+mmIuouIuzCIZD2IpFGIWrN4WpiImiGIbKOIa4CIe6GF+qGI3JaIupx4wCKH7GmI20SIDc6IqdCIufWBa8GBO+iHt66H3uCIx7yIUEJo7IiH/TuIjDaI3FqBKcYnIT1o4WAoHihHITuIMaQ5D+aJC5QYEWsoEQGZESOZEbKHMVRHM2N4I1JkEmuII6p5E4x5FAd4LwglEYgpANYS8yuHRCWBY2GGQ9qFIt+VYvWREn9C9R/9cQU0eN++iJ4ziKwThmv1dmiFhZn/QASJmUSGkBy/iO4eeM6NZ+v/h+pPUAB3CVWHmVENCU8giP9Fh/RLmKRjlIEJCVZqkBPOmNhAiOyLd9wqiWjegAZjmXaemUUsiH4wdxduiElTSXc7kB+giXxMiWBeeWWdiV77UBfjmXTMmJ1eiT11iYUhmPVMlIFrCYcxkBlLh/4RWL2jeZ3GeXQRUBmDmXD+CYPYmOcVgW/1eKgkl7cpmVFyABtoIBycSZ/9V/rGmYrimazFeWWEmbt3Kbr8h/nsl+ejmVdyhLVomVs1mbxHmOxpmOb9Wat5ia07kQpFmap7mZxdmZ1MlX1v/Zjb55hJeJmZoZmOV5ioT5mclJmcu5d4q5mJ5ib5vYavNGakHoZ5C5EBDwnwD6nw7AlZXplX9HetInlqy2oDOpiYooMvn5avtZiX5FoQ6Xl663htJYjgQan3f5la33dj+5oaR4na/Zn+2pERn3LwI5JAqJcQz5KSgJMi+6ERLYkDM6PhS5ozzaoz56TzW2UCQopENapEFqpJISG+hRI0vKpE4qJk1qGxgSpfRBpVX6pEqKpVKKMVZaJF3qpVo6pWG6pbTzpVw6pktipmcKpWSapmhqN2oKp28qp2zapnSapXiap1dap3oqpnZKOHFapnMKqIMqqHz6p3QSqMmjqIv/WqiNeqiI6jGM6liTSqmOmqiX+qdIlqmSyqkdVKmdGqmYCqluSqql2qdgaqqiylegKlKt6qqeCququqqM8apvZau3Gqtlgau8Ohm6mquzSqjBaqi0KquouqbHiqx7mqx3WqzAyqzEuqzS6qfQGq3O6qu/iq3D+qjVyq3Teq2buq2j2q3j+q2neq3aSq6hqq7raq7KSq3oWqvZKq/iaqn12q7wiq7hyq6fOq/0yq/Giq7eQqQ4R7AFi6QvVqMqenMv9qMO+7AQG7HCYpENhJExBpIJa7AZS4IiiYIkyS4mWSQ5OhJFN4O9+q/uajc1yYMjhUIRqmsTeq/9KrPbWKIG/ziUcFaU+wqw+MihDpGTXhGzPPusKVugT4mXUfmeodms+Zqq35mbx2l7oPmW7+q0ReuhzYi0bTm1hymsQ8uqFqqb1cmbJlq159q0J6qakemeGbqX5Yq2ZruegwmVW6u0VMu0Vgu3iPmhBwqWOaugDZquV8uX89i3Iap490i0euu10gmeqzm2XNubeHu2eYudjru2yNm2yumti2utlgu14TkZ42mOncu5lZu22Ym5Umu3Xeu5lPu6csuPKbq6mgufpgu7kxu7KEq3ksm6ksu4pfu2n+trYiueZEueuZu8wDu8lgh4CaqNu+qv9vmgovayUie0gyu8uGu0fCuUPtd14f/TdX6jsAYxsAj7veTbLDHagCPrNxL7vvAbvxJLsQpksSyoseiLvyrYgh67LTfmgTg2dANTsix5sjs7ZCNLEkq2JUC7EDupnsrrunG7t0PCcFORAA0cAA/sndlrr1/LihMYZhm8wai5vRIcwYTLHhZMEyOMvcHrwR0MlA0owtarky58uhO8vKg7PiscHy0svgf8wvjKvHXiUz+ss4GLskJMop9Cwzh7bYBrwEnMoD0sF0ccxdI7xX1VxQFyxdCruDiMwrXYkE7svPb3xWBLs9GLm37DxdDhxSMasEssx0TsxgOAwTUctECsxVLMnzRjx3j8xPS2x32cxb0DyHCcuGn//MFKXMdljKBnHMdrzMjTm2GP7LdQjMaCO8eT3Lhrgch57MA3bMK3m8NYu06XfLilJ8lgTMraq7t/nMoYKqKKvMlhrMOwDDKg/GIIGITpSxDme6TCLFEOWcEACWHym8zKvMwQSb8EZL8f+csDEcwHO8wJ27El+bHmErJLksAEPDBBfMsnTEIJHJMGk8gaSseuDMOPqbb92Lu1u7S4LMbsTMQXmrTxfLfzvM/jfMprybtsS8vp3Mmc3Mq5fLnvHNCIO9AGTc9DvMOg+7jGG7ll69AzS8l9dc91m8+tW8oWrc4HHdGq63/HS7ri7NH8nMJHC6KzvNBuW8/r/NAhTbxR/0vSFI28Kf3KH92zNqt1ZhyWmtzIMX3RnkzTocsYo9uhKN3PS63S3StvgqyfhJzFUhyI+RXKGjzKptzUOs29WcvS+CzQLy3TO93QXv2NAJ25Yr25Xc3Ube3UX224Lb3KtSzUW/3WYxyUUP3TfxvU4TzUNavXVx3VEjrVamzWbk2OPZ0VEtUAMuLYYdEADJu/57u/5RM3kl0zmc3MnN3Znj1i1kzZ1TzaGxvall3ZHodSCLDarN3aru3amuIRrz3btP3Xdw3TXPottL3brJ0Rus3buy3AJvTNOcjHVK0v5dyyOInVJBydBb3IMczEheu9iacBDnDd2O0AaHm9ho3RVf99n1HSnH7pKc1t22UN3e2cugmt1sDpl+mpx0hcyGzcvJimAaV5AN0pyt0d3SB93iC82D/rMh1w31sJ31h82Ohtz8UrugwRm/c9oPod38c938JGSwtw31lp4H5t3Aj+34J9s6Ezn7IpnLNim+XN4d4dtjW9mwFwnllJ4rSS1fv93LYM2Dz94T79aBNgls9JKyau1f5t10HOoBoNzxjeATY84yeN17hNwWittfDs4IuZ3zIu4R0u5Gf9z1Ae0O2dmdxt5SlO4UUe0OLNmF9+4GH+tEYt0QzuEA4wAXAe5xMA4UkO5vxN0Aq+4pDruxWd003u5zJsoNSNyYNs5zSO5f7/zJ5pTbtrbbtMTtaALt2CvtcqCNkjYulYkdnSLBDUXNqk/b2YHWqb/dmkXuqmfj+m7XGTreoYK9qfftqpTnKrntr0wQC2fuu4nuu6Phm63uu+HoPy7d2/Ddyz7dvEHtzmZScxuI53Eux3TpPJvbIQ8YNIRV1h5eyHTuQLzhjM/hK0RW0o/uwJDtFrPtLqWGD9deXmneWKvuUe0+0D8O3wtu6JDemJPrfufobofu0TjtFWDeL1uF/dbltpLu4ePt2UHlv73mbYvuSKjeOMfYleaO0M3+8GP5Y+2xDUTlUUj2cNb+OSSL34KfFAyF/8ru7hnt4IPbug2F0DT+9c/ef3/y67i97yAj9fFp/tGb3tHc/xJl/xKP/dIr/xa9Xzjfbxtx3oKy3X4XjzBlbwOv/vOa7wLo/zQW/I4J3sTp/uUO/wSv/Ug92FJf/yKe/1RK3yIr3e+l71T3/xMJ/XCB/2KnjMD0YCDkb3qjLrst7qsP7qrB7qmn3qgj/4hF8ssR6Bel+QfM/qmw4Ane7qnh7576Tax/7asV0SlV/sZQ/y457bmf/axv75rS3cJkXcJvX2Mh/zHiPtKWnOTYbOY332Zt/f7I7vYL3RjS7P9S77nH/wky73hC7Vhj77eE7u9B38hT38vY/ocP3ktw/Pua/Pu0/703/jcQ/wyA+zSr78qP//9XE96Kr8vKzc+UOuxVIf8XydyeNf40nP+zN9/OEfyXXd/e7v5Fr+/ApN1wxN/pFO/c1//wARQOBAggIBHESYUKFCAQIKPoRIkEDDiBULNlyYUeNBjBs9Iuz40WNIkRobErCYMsBJlRZLZqTYMuJEhzIhknzJUEBOkzt5LsT5EwBLmw+JFiUolGNNpANpNiUY9KdUnlRzWn15FKrWpkqHMoX6FKpArCXLijz7Me1IASjHrmz7NoDXmGPFjl27MW9PryB99uWKNHBRumCb3t36t3DfpYy/Oh5sM7LMwnIRN90LU7HSzEA3C53cMrTKym8vC/48NXXV1VdbZ437dnT/ytJ26yaG/NqsbrS81cbGCxyqUgMNG5g2PrZBQwN9iwto7vV5dOLMnc+uuFw4Uu0NvX8HH178ePLlzZ9Hn179evbt3b+HH1/+fPr17d/H3xnA9OvQpVv/z78AqROKvwH7I/CnBF1DoEEHH4QwwuYairBCCxHQL6EM/cqtQwMuBLFBjD4M8cLCFhhrAQwFQBGqBTZsjDEYH5NRAARabOpFG3FEaoHawroNM9/Y6rDG3NwKDsnhODMMqdOKmjHKIfWasiclB5ogAi23jOABo7ZD6sfDgkStSMCq1OzIhxw4oE032+zgyyvDZNIyMqFE0zMzF6vxygXeBPQABy4Ck7A6/5FrEs89mVwUtO0iCBRQHrGrSEwn75QsT50aVU3NgR6IFNAIoirUJkuLejJTTllb1TUlIQgVUC/JKpWyQ21LVFUjdz3T0wDYjPVNCAaiNKJTbUpVJilbhc1XWN+8QIILIrWA1jkNddTOXJXVVMNuOeS1MCRBfVMCASQIdFRr5TpWpmRF+zbGXsNl8tU3vQu02nXfareld1Vall5HfQ3AgnsbAnQCUq819VYgt4WX2d0k7u3KCdzE901CGbY1W0TlCnhekcV9CGOE3Rx0YXYdHhNigOOlcWRG+1yzzYwPGHbjlT3GFWSYQ+bzzGsdcMA7oh2YVWV+Wb7U5ZSAnllmR/859s6iYiHqV6V/n/65a4p/oxrTfcfKOqWtrfZa4E5pjggDATBAm+OWqhPgOFztbkq7BXMykO69X+q7wKsL0k7ulLrLL3HFF2e8cccfhzxyyScnz28ED8Tc8swFF1Bz4gor8cIJbQzdQqgHVptVEkuHcETWIzwxxRV5LErHr4lMnUHabdLxxhTLtujsik5fW2rVDOcaeZeYRlXsiHNvFvrdlB++1rmZR9b5l2+nknsrfabeWOzd1Z5r79OUvmLwd1ZNW5/P1zN9sNdfmueH35cfd+NZC/8m60kbn7/Khzb4bSp/VOqfnNjHGvfJJm37cw3BhJTAggCvIsKLCPFYdcD/7zmQgkkJoNYGWL0CequEfvkgsf5HmxCabYQZfGDQZFgv+pGthcF7of9OKK8ZTq2GS7Jfy/AHwegRcXo/7MoNL5jDL+0wZj08HhLpFMSmDRGKGzSi+jy4QNc0MDhO1KCrpIit9n3MgWCMIQ23WL8y9uyMHERfFue3RhtSsXlOIyAc4ydHtqRwbEDkyXPw1rJBFkVvl9ucggBENz8W7i2Io1wkJTlJSlbSkpfE5Hs8x7m/iSRwiuwcJxG5SVB20pNXjCAfu6dHA6qSL46JGionxkoT0hJcroTlLWXZGzT20pa/fCLq9geCChTTmBWgwB53yUtgLvM3wJQKBUKVAWXG6NKawsxlKnHZSihmIFYV4OY1i+fMbNZym+bsIQiCdQAOoFOcWCRnOXn4ziL20JvBMoE7sUnPesrzmb+E2gnW2aZkznOfBx2nP/UXzzjKkAMDbVMJdMnPWZ4TlmGsqAwrEKhoiUcEHggmQuFJUYUaNKEnTeg93WSu8RQgpCgdqUhLqk+Y9nOfKABUR8ETghGAFKMZJWlJf/pPqak0VOCcqExtWtOZmjSmT42pNGNFTZpCVZsMLedQFyrOD8QqBeFUKlDDOlOtrtJ4D43UB6o5VqJiNZtlfeX+VEAButaVAu0EK1PFqleRBAQAIfkEAAoAAAAsnADMAKUACQGF/v7+RzyKAAAAAICA0M3cOzs76ejp/osA29nnAHp6ydnbSkCIz+XljVtVQTd/aktwzHUlsGk6LBgApWVC/9quWUR8/7diql0AMChcelJkm2BK/5cY/8F3/+zW23wZKCYkq6ur/6Q2/65N/9KdGxoYu9LS/58r/+C7//DeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAwgcSLDgQAICBBhcyDBAQgAQI0qcSBFiQgINMw68uPCBx48eKxh8WLFkRY4aU6pEqFAlQ5ImY1oUgNHlQpQEHxzYyXMnhJECZArFabPowYRGC8IUWpJoUqcBIPScmkFpUKYmoSZdiXSrwKVYJ2q16bTB1LNWwzal6dUoy7YOr6oVy7at07NnNRAEOxfAWLgL39qV23dmTa9ENeA9K/Ir4cJ/ARcUjPhx38gpcVZYfDbCRstzMUsWSHkr39B1Kx+OwPnsA8eFJYoeXfopaLWzG+I02/OChITAg2OIGztibsC1jZ7Gndp0c6k8fwefrnB52ONwkxe1jhU70MMBdPL/9E0d+HDuTL171U729vXmtsEHYN369efihkenZO8S/VD4ysG3GWee7eVedwDqN1lXleHnV4LtyReAYostkBZ+6m3Fn0r+yZThZxJGBcGIJELQAFAOfugWg86lCGF/L3J4YHoxKrhhZjP+F2KEcHUYk4pF3aiRj1nVOKSRGRG51o4KksaibS4yKaOUOEbZJEMLKPBkgAYUZ0BCClhYWZiDdRnblwKQGWB5bLbp5ptwxinnnHTWaeedeOap55589slnX2iaCWhCgg4qQKFzBeoloYse6uB7GObooaQ/Ulqki49CWpySJ1m6FqaRZopgqKRu6mmnoIo6aamxcUqRq3Sl/6rqpabKCtmpr+Ia66yVsnqrrZfpKpuwv9bqa7DAosZrr8Y22yqxMyW7bLTOFlstssdOS+2z0mp6rbLa5potc92OOu60sBoH7YPl0tjurOluy+255n6rbbzs0uuuvv+FK6693s4LcL3+yovtwPsi3G/B6vK7qsPMKqwqvvjmK3HEAjNM8boVd8zxuuReTKvIn0Kc6cbvPkwyqgwDoGhhL8PM6Jkzy+wozTej5ufOPPfs889ABy300HsSYPTRSCdtNAJoKqD0009rmbOhiKoVM9WNGoAA1FxTadACCCQkZlILeJwyxrciMPaVBAmZ5Mdnj/ys17RtuR3cJpdsKt2Suf+tG94r/7s324HZ3V7cemd8K9/IGQ4j4iwHThfj2TkuI+SCKx4s5W35/RLgmoM7OOELttRg3pGHzhzn61leJeqZWzs36aX3CLrsuG9Oe9uuH4n5rpIPy7qGvb/9+7DHqzt8Up7fdPvBqr+3/Iqmtwg78NEjOH2Qxf+dvMHQL777UdVDeT3y5ys/vpPlB/i9xdnTuL1NzaOYPviiz75+/RcG3/D9D5qfS/hnoPeZLVICTEmWuve5qoXlaomqGdZwpqa7Ee2CGMygBjfIwQ7CqWtPYxqYQMg1qTkQKxC0mgQjODUWao2ESjMK2MTmlbI9L3+5Q43a9sdA5xnwhqtbXwD/CHgfAB5wdOMjImyMCETpCVGJxGHiDxO4nx7az3/4CxkSdwfFI8ZvYVukXRebSLAcBpGH7bvbD6f4RCv274sqg2OlqKiRMa6RiXTMiB2liMc2pvFwfMRiAP1ouzsKEkj0c2MBAynHIuWxIXs8JBnlR8jBGLKRS6rk6STJRjQWkpFmdKInLQnK8OlulJvEZOpCqb31LfCPMDohU1L4wBWqsIW3rGB7PMjLXvryl8AEJgyRJsI0DVNpJsyaMnEmS6EEamvHNJoMwyaAtRXFhpdkJY12mERFFpGTfUSl9cApyUcyJJKqjJ0pUWPOwsHycqXE4Sm7+c7XkTOdk9PkOPGJ/75y6tN899Smjv7pvnhqUX/0/GRA13nGhJJyofJkJ0HVaNCAiU+cAOXn//yJ0YJC9KAXdWgqBRpHks5xooD8qEXnyUVvLlGlZWSpGF0aRZgmDKEtrafvKhpTiXaUojYFI05nKgAHeMUBtqwlLpXaTJnQEoUZCqZUp0rVqlp1ThPMqlZduEybNbUkCAirWMdK1rB2KSFlTataqZlNhkJKa2tV6zRpuBVs8vSmabMm4dBpUrn1NZNtyUADBkvYBlSldg/VaBZX6tOtiAcv1uSrW3sKUpkWBTp4KRD5FKpY+P21U8vLQGsOYB/2cfaz6oxoQ43igdH+ZLOJRS32ZCu8rf/wZrQnMm1sJ4tX3g40KaPtCWxH6tuSFvekwJ0KecozHMmqlrGVbWxRziIdNg2Rpl6k7UaH6pIJKLe602kudicpVO3mZyvB9QDvdGq8u5bXsi657WJKe132es+9xpWuUTDbGcQS97mUha4oHcuZxgx3n+b17HEdCZcGTODBEJ5Abtd72gUnLsGIHOB42wpgSv40pZ3NLnzZ5tzomljArUSjUbeC1KVC1cWzTOqLv2oSNA3vqjjOsY53rMGtMrWrPv4xMwvDgCIb+chILvJMkszkJjNAxB1+r7LgGteyDuDKWM6ylrH8ZAEkYMtgBnMCoHziACOIm0UJs5oHILUvr1n/zWMmb37L7GGjvBnMbb5zmOPMYTr/1s56znKeA61lPuMXbSPWCKGxPOhFX9nQQZ3zgNPs6EY7GtIhljNyAb1oSy8a0xjWNIOT4mg2J8TNpQa1hVe56lwNr9SeJrSqoyxpM9eZ0p0+dakfTWYU91a/LoG1rnc9az/X+tertYmwvbzrARTb11I29qZxTehYB/rZto42tCf16koPO9W9zvax/0ztQFtbz9hG9rYRDWyVLBvVlw63usU9bWV7m9nElre26T1qTlf72/EW9YVbnU9S3xven9b3uBde72AfvNnp3ve8J05umyTg4hjPuMYxXoJTb/zjIJdxjGHsTJGXnORO/40qj1fO8pa7XE5BHjmNS/JUmQOZq32pclnPKgCdx1Xh7Jb2pajs87DOtZo1BLpfCT4sNBO1wrQO+rr7nVOoC33gUaf603ebdaxfHbQofdyhl95uEm947F6f9Nb/+/XUtr3gIkUw07cb6nYapMRTTzu/ARv3jIa6k333aKYBX3Wuv72fmba7f+XedVY33tVhh2ekpZ7iwAN18OG0PIj/nvnCsz3vjj+8+j4s9smTPdmkw/veQw96yJNe8pjnKD1XnJQWz7wiNT/57SmS+5QjCSgvD77wh5/jmOv+5rfc/UR6HxPmV6ToYuU59NOqdL1TfFVEL/rR9eoSu5re+v9xev/qz/591lfe84wX/WJX7/qCTCAC8I9/BOhLPau3frZzPy9B5NsT9bbOvp/TZ/dXWwSxAK0xYfVneANId/k3SARBH5zBfVUEgD6EduZ3a49FIMRDgVcUe4lHEPzFGfTHFRz4Rpwne/zHGa/FPSW4SOXndk6EAWwiAReAF8xDfh5Yd3Exg/2VgJ/Hfvj3ePkkg+VBg4xxgy34TTnYgEQxIK0xARtof0CIeDpIEN7VGv8nhddHeVuodQFwgFmogFPIgEJIgPu3GCuIhFoocWzIcF4YAIVVWCOYSEn4UktYhqOneaV3h+rngHoIeyf4gSp2VCbne8onEc5Hc4XYfCpHfI7/+IiQyEHGZ4jIJ2ReVYlCMX1M8yCaSFbVd4Ft2CvZ53Pbl3QCB4puuBbit1c4GIhV+If2xIcLqH/o53dMeIrtV4uC54q3qHgUJoZdCH6pCHavF4u8iIe0uHbpN4sKhox+qIuXd4x9mGEkuIbDCINjmIzjV4c1JYvZ+IzKaIvO+Im5GI67eIuEZ47RiI6dp46bx46yB43vOI64CHfyuIfSyIzUOIHWyIWheHpqt439CJDf6DG+eGDiOI31aIY5RXtGYXuYaHNDhjM3FokWeZEYuTOTyIgox5GHGBGJiHuLWGMjeTLQQo5BqJAts37/KIz+eI0piS4nuZBkqJIriTIGxWUBOrmTOkkB2BiM34OSVAgRI7AYG/CTLRmUNIk/G8AZHBCT36h+QsmAHDBaHTCUziiVS+lZHRBcIoCVfaiV2SQCwXUAJ1CTzCiWjEQBZXkAJoCWUTmLUykvFvBdbPIBINCMYSmXW7kUXdkT4EUdBaCXacmX2VSV4xGYCUECeEmYcZmNc6lgTdkaTwmXQHk/kckXRckZRwmWhQmZfQkaIcAZPumZj3mZpomaWPmXZxECSAmTipWZt0EBtFmbtIkCr/mSFREQACH5BAAKAAAALJgAGwDFALoBhf7+/kc8igAAAACAgNDN3Ds7O+no6f6LANvZ5wB6esnZ20pAic/l5UE3f2tMb//ZrY1bVf/AdiwYAMx1JadlQf+4Yv/QmP/r1KpdAP+VFVhEfbRrNxsWESgnJjAoXX1UYZtgSqqqqv+lOP+dJ/+uTf/Ih9t8Gv/w3rvS0v+wUf/guwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABcEGEiwoEGDCxAIECDwoMODCxYCmEixosWLExciaPiwY4CIAjZ69AgSo0mTCxkMWMmypcuXCTRyHAmRpkcCC212lHiyJ8WFBHQ+BCrUIU+fPRcqeMm0aUwBQYtKvZlzKsGjSFFCtXp1K9cAWLNeVNq0bMunUb9+xSlAbVixFYl+lcv1LdyMApaaNYtWrVq2bgXcxUjXauGpdu+S3Vu2r1+ugOcKHmzxsFTLRRPDXcyYqePHUyPXnUz5p9fRaQ2TLg2Ac2eXn0EXFa2atenUiE/Xtt0672vPmGWPpJ2bd2/cl3UX5+3698rYwm0ST248uE7rNjWLbe4cevThVUdX/1eeHHnm1aW5//b+veP08+PNC8VOU3tW9a/Zt3f4fj56yvSNFKBH9iGFX2f67WdQf9f9N9iAO5EHH3O+OXcWhO0xmJ2Diknon3wNjqeXhc9h+J2G9XG4mYcNgrihiCSylKCCBKEooIrbsbihiynCGOMAM9IYgI0E4nifjinyeKOPMQZJI5ERxheYkkUySaKTCkI5lJEGInkjlVFSOOKVJkanpVFc+lSmQWsWVKCaFTbZ5mMLKBCeagbYZoBSMyWnQJ/n5cnannklYOihiCaq6FN/SrbQo5BGKumklFZq6aWYZqrpppx26umnoIYq6qiklmrqqZsqQMCqrLbqqqsIEP+q6qu0vmqnAIKWRqgBCNTqK6u38vrrr8HquZCww9YabK/J0tqoVQktBKhOJVEoErQycVWtbdliG9K0NoGr05kHvZmUl0WCuaWUkql74p3LcYtuhO6Wm+a59bI5b4bwUkdhvm7uiya7qAlJVVuSEawawF0pnBvDj5Grr8PlBUbxeRD7JXHAF39o8b9TGuxevxPKm/GcA5l7EspgCWwmyf513OLHJocsMn8whwhyuzSzxjLLf+X84s4FJ0z0wjfjjLB4Rz/cc3ou65tx0EvvVjPPRl9ddNIFbdxw0xVn7XPUAU+9ltA9go3x0wCS3ZXZkKG9pNoeiw31yW5HLHeVdM//bHfbeMNtldcp37ty3i3jLfOOXC+4d5haI/33g4gDfXbV8Y4dONuUb954jY+v2zfjk3fo+ed1ho5mrpTJKm6Kz+JpbF6v3xh7bqjmrvvuvPfu++/ABy987rM2C6usxtt67OzIJg/sscw6v2qxg0Iv/fO4Ri/97UVFy5C2Kmt17VQgjS/VtmOb33235FPNuek2Mx25058PRHjLiyf5/oqnf35/+ISpnADz96X62U919iJguvaXo/417n+G04oD/TW/sNUPggqkFwOPNMGkYXB0+isd/+LnPwRODIQFFGEDSfhAE3IMhQtUIQdZyLUPVnBtMuxSB29mQ83RsGQ+xNoF/134tRvWTX5B3FoJMUfBJEoOiXf7IQ+JWLgMbklxMNSgAXsYRSFazYn0GyITgdhFJWaujE9c4gZ1KEWdGdFvYlwjnHboRjBaUI05nGMbh/ZG0sUxj/jaY9r6GELU3SowrBuM6yTDvUDNrpH+gWSDhkfJSlrykpjMpCY3GSkFePKToAylKBkgK1Ga8pTLq172rjc967GSerpy5fWWxUoCKCABPxoAAxaCSzlFMIDqE0r5atcR9EEtmNRi3/l6GaNbMdNCT7EimrBIyBRy5ZkWcuaPopnFK8oxkF6cCjado01fStNe1LQjDq+Zy3KS6ZdjEWA60RhGq4zzN+6EJgDjSf9HPqrziPZsJy+3uc/KyPObh+snTe75mnx2p6BxOSggEyrIkTC0Mw5dD0RvU1G+VTOG7PxRRvOzUbwodG4f1WJImzlQc3ZzmgiVYEc7clHGjBRBJT3OTEWXUm9+paZ7uSljuNlTmE5UpuGUClDNItS9EPWfcPwiPe8oToEKYKlOyenPBvhSdKoFq01pKl+0KlEoAm6nDgErU8TaGLKe1KNQ9WNARdrSd55TajEN4FtpalW1wsataE1gV/H6074SFJ4G3StP41rIubL0qoe9a9nyys/AGmRRmF2UMzPLWUOlMpa0YyQxI5TIuyxyNJLcECdXy9rWuva1sP1d8aQXK6X/1BKWrZPl9nTrPNwq8lgMOKVwQcnb5KV2JN4brUOM2TZkhkuZ60NsRFVyWOfSRLkP4eJZk0rGto1Jn4hz31H1atkXyuu7Dw3v5Sib2PIWcWzo1ah64zbGmA12suONZ3xJOt/BURF/930beyO6X5z2NzT/BSx37XveXGJpP9rtnHurKCYHW46+A+bogusItQIP9cL+rS+Ht2vGJnbYwgeWSoTht2F/nviwgkOwiF1M4jSe0bsojrGKEyxdDZe4u5TzsFNBLOMMm3TCAG4wjA04JB5LVsD5NaiQx5ri2Tg5wIWbJ46XvMUrF9WrUSZwjpm84hG2eJDwHXOXZ4zmqa5T/6pb9qWOrcxmlDLWmnAOspr/aFYJn9nOL5azAVNX5yKVFi6nVc1xb3Rosciqs53lk6NiS+lKW/rSmNbkbJ1X27zc9rO5XeUri2s835qW1M2i5SuxixCFfM9b47EucqErTLLKmiS0ppZ4+8ziHzP4ziDFI6/N7OsR+7nYBivzCv8MVzcDtIWFhhywVZrnXtu4hl6etk+HvWxkC0nZM2S2tJ0dVWgbWafiXiy55WruMPv42kC2dj3bze1wexvQNZ43tqOt7nxT1cT+fjO9q01seP963Y0d+I2PbXBjy/vfHsw2wvG88IcLfN/n3qqWGa7viPN7YFhOXMbLKmyCd7vhNP/mOMSn+HHBfpmw9WZjut/V8hO+HL8x1+PMX1Zz82rbqDkH572zJPGAPxvgKr/4zQiNyEeyWl+LNrTTRTvpTFv96ljPutYrNdyuk9K2o8YV87TXW1Qny9SINvuwVD1Lv7pkl68mn62fTpBhgq/HeLl1MXMdLre35JDuPvLObf5zMK+URIAPOkUH7/OJB7uqdO35ex1P7YoXvONC8TtLEm9ye6O8zUYvt1KtOnLFgvzmUD58Noue9KPH+/Ir14nmV8J5y58c8w6HvdIzT/rAo3vo4w49u0cf+dIjWcHAf8jsB1B7pFvc9QcXfsKJ/1jjM37y0qc49RHP+ueL/vW3j73/TZbffPB7Hvcp9/7wi0L+7use+rkP/+5l33vFIzX5py88zB3LfclTGPVZZn34dxDt539Jpn8413kyN4CXVX8KqHMM2HjZ93jbt3oGiHyfh2+t9328V3y+p3ECmIEeAWmcVX7+0WhZkWi5EXWkNXWoRXddsXUyOIM0WIObtGnJ02k4WGqg9luiNktqRyw9eGo/uFtF2Fsw+BGuloTMRTl69xB2B2vWwoR8d1279oBCJ4LBt4Hrx3Ih2HEOEIZiGIYa4HIImHolZ3vn928OcABu+IZuOAFmSHnblobO936iNwFwuIcfQHh0CHR2aH4LaGMQsIeG6IcTWHkY94EkFwCG/2iIICCBXDh9HveFdwQCj2iIZfh/ZxiAa2aJ66QBmWiIG4B9k6h9lciIp7MBo2iIDsCJf2h4gRh9pwhShQiHGCABGPCIpqh+lOiFqkhCeviGEiAAEmCIENCLeNiFIgNug1hPbfiGj7KHcqiM8gd/yeZ+11hurOiG0wiHr2iNazh/2XiBePd7NiaK3rgQcFiKkuiLqAiM9kdeSYWJB/CNbtgnGIh+36aN4+h6EzABjxKQE5CMiFiLipiK81hZG/YozZaIdahwd7iNzOiQWwiPFKiQWLh4xWaR/YaQEbmIC9leG+YBAuABDwmSgMg1TCcZKIgUKugnSdgyL+kTMXkeLP+4JTa4kzzZkz4pKjXZE7uiSkF5EkNJlMyTlEg5KONBIU3plFApL09pHFJZlVY5NlOJlVFJlVCzlV3plW2TlV/JlWNZlmYZlmBJOWJJliuSlh2ylmrplm3JlmiplVd5lnF5l3VJl3Npl36Jl28plznCl3kJmH35l3tpmINJmIeZmI5ZmIp5JHDJmFohmJJpmV0ymY1JmYeDmXCimYupl5DJmZUpmoFpmps5mqpJmgblmecCmpeJmqHJmqWJmKuZmqdpm7lJm/Hkmp3pm7Wpm7jJm70BnAEEm5lpnL1JnMcpm7EpnLMZmcnJnK3pnNMJnc8pnZ9JndWJndepna+pnN3/yZ14IZ4RhZzbaZ3pSZ7FqZ7h6Z6/CZ/BqStKGWpFaRJHCVr3iRH5aZ/1GWo/GaACOqABuoPNooOfJnZLqZ//6YPNY4QPioTasoR3h57v6Z1w8oTLVYXIdYVq+J2PuZsQuZIiuZHzGaLDSZG/2Iz+CKK3GZ0jKosSKYjrCZ7x2Yki94mMaKE3Gov7N6O0KKIvmp0x+qMl+qE1iqIwqpIyeqQTSaRD6qIYmZDyaKLNKZ9XiqNEtmPmyKMnGqUQqIX80qXmaRpeSo8RCBrOmKRgeqE+moBAGn9QKqR0uowrWo4jd6bLCYA5ymdWuqcY6qZFCqdOSqOC2qY9OqhoGKfp/5eic+qozyh+/UimWAqoNnp/YkpzeVqm5RlyW0pnm1qp4/mmi5o0LTka+3kR/emgDUqEOYkmr2ovBDqrtFqrVmegyYKgYZeqFrGqRMirFeGraXeExgVcXTdcTEihUiiq58mpvaGhEJESDuahTyqlkMqmU7olU9YY1GqoiYqoX5qtsDqt67WjzrqPfkKuGGauzGqmnhonV9KtQXqth1qnKmo76hpioRqo3yqu9rKtTiGvcmqtS2qv/9ggAOsZAtuoBUuv/Wqn+LpNCwt64Jqll4qmipavRcau/BquEFskCQsTE6uBBkuwj3qvIKuxXLqvF2upilo4IQsbI5uSJYutDv+LqSuosqDKsS07qi/bMjF7FjN7kTdrsUp6sBsStDIytB9Zs/VatAyJWjorFGv6tA17tUgLO1M7Li1qs1hrspGKk1srHV1rtSd7tmEbSWNLE1X7sF/rtVkbsU3yFacqOwvqn3frg7EKdSSYWbb6t4AbuJyEq8Oiq0CooAyat786dgkaocaVrNJSoed6jnN3d9CKEEybfz3brO0qeGmqN5TasUZbsVGbqTzHskcLtmHKj09Stm6LtnCbthzIoqG7ue7auejIukRXu6kbu76bhboLYa7rsW9rtrLLjHjKs71rvKsrqa3Lu6Trs9EbUaZ3uso7vberpVVGtcM7uk77usf/e6eTirrY26l8+qncC73fS7ywC7zOu7vku77e+7FUSrvxC7XS669GqpFIyrz+C77NS47je73y67JMur9V2r8A/L/sG77xaL8EjL+cq70Z0wCXY8FW0QBDOKzAShHC6mgbDMJ5o8EjLLgmfMIozDsdPBEfDJMhnIIv7MKIi7eJW8OtgwA4nMM6vMM8nCcawcNAHMRkpaf525a8EsRInMMSccRJjMSQG3fnM8STG2tU+C3akrlzuLwLvMU4+77CO0YQEMZiHMbh+I4SnL2267mmKxzTEY2PqI+UG8dETL1IRq3D+IgUcJAFXMTFy5FrLBvE8QGteABlfIBp3J64C4I6/0oQg3wA1QiLWtzAv+vHwTumBWGPg9yHkFy+iCy6pVvJmjoQ6tjIJiCOqsvA84uy9Zu8AUABhpiLlIKSUpzIcVy9bBweo0yMlmLIkZzK7UvJXmzJBOHKuFiMkyLLcjzF7zpn6VsQC9DIj8zLnDzLFExm8HKLo1jI1HzI6CrAz3sQJjCK7mjKkyzJqPzJwRzKBeHGhgjHc4zGvYzO3gy/BwEBFHDP+EwBmmzGfWzOXIyxoGy9IznB3FzLdVyuAw3P02zQ1we699vPvuzAGSkUGBxiFS0VJLzCANDCMqzRHG2TKEPCEEPCKVzSJn3SmuLRMSyUK83SM8yqisvBrYpoDP9Q0zZ90zid03iR0zzd03D3zubrya3JxE0MxEtc1Ei8fHA3e09V0ED9rFV8uc5cgEy9zfHMx3E70fQXeVWdzLT8ZJ6oeuRUV+D11FZ9wITKfoblUl89ubZcEFQdWcpMy29NEHHN1kJN0FdNxw0dAHdtV3Od1+8W0A3I1XLd1nSNZH9d1oFd0HU9EIudXmbt1dVcWIaN107t1oq91oCN2IKtxoQN15zN2J7t2Jt92Z392Z1s2n0d2fI12ZP92H492pLd2Hs92OlMgLT92ra90LLt2vwF25rd2rsd3L29x3z9uX7dt4uCArzE3IrS0kYp3fhJ3fxp3aoa0tqN0tzd3d3/rdIvLdMxLcLgXd4z7WhIHcQ+HBLpbdSUndlSSdTtjcNHPd86/MRVLNy0LNV1x6EkgcV6fMZBzdrKDeD8/Mv+nOAAndu3rL4Cvtq3DdoMDsjda8DIrdD6m9YJXK3lHNEdLs/YOMAJPeARDuFoXar8y+HnjNWnDMzz/MUPjeAe7r4vLswjbuIXTuInHtazOLAfzuI/ntx/rKYVDuQrrtc73qc9zrAybuEPrsh+qsAK7uQQDeKzy8p/auT/TOVZvcoinuVInuM4Tr8hmeLeOuVajuZCHtpE7uBVnuYz7uIh/s0R/OZhTuYkaubzaucY/uSNiNEXzBUZfd4wHN7iDVoj/73d3r3ojC645m3DMA3pizvehe7R9q3D633pOnzWYj7E8j3f9a3p+C25pV3i/r13VgwtBk7OR97nfC7hNa7OYO7qTZ6/JRABuJ7rEXABVo68Xy7lca7mCm0BmTgCvS6+dH7jnJ6aGTCKJUCSBY7Qs67jnf4fEdCKGQDtQ+7QdV7rBH0BjVwBaz7hbR7jLT6fJNDIB8DrsD7n9Kzs772XD6DuByACuB3rAj3tY/7q7RkCHcABkqKLj8juUL7kFFvtT1kAlXKMe5gC9+7uMN7t596c/s4BAA8pAr+HJ/DwV/7rKr7lcO6yzd6KETDu+N7g5h7k8FwC2K7tbM7t8K7fFf8xAqP4AC5P7jCv78u+pPP+iPZ+8ydP4W7u7bQupQ9gAUif9BbA7iYP8Tau8/Hu2we9rjF/3HjepHru461O7X4u2xfNpV9P0djdq2MfrGXvwWfPwtud6I3e9m5PoI9Ow3If6XM/6ZKOaJqOw5me964m80796e0d6pc+6suq2jt/JPw9EFHYPtIO7Fw+8Y8fwE4v644f8pZ/56pc5ht+5sHe+ZFP45Of75WP+UTP9Zmf55u/56W/76tf8Ix68H4e21OvrxKv8qbf5ZoPwVVf6kmOvlw79JB/+UUv+R2f7FDv9z/r+2QL/LbP+sHf9MX/7sdv9aeP9amv9SBP+s/P8b7/bvyjP/xb7/zE3/3S//23v/3nP/7IXv4fL+zpH/6uX6iqj/7iD/9/nvVMTv+HL9Fe7v3t7/kAAUCgQAECBh5EmBBhQYUNGxYkEEDiRIoVLQaAeFHjxQYbPUokULDjR40NChpwmFKggZMqU7IUgNJlQ5gyZyaEGZFkyYw7ORYEGlToUKJFjR5FmlTpUqZNnT6FGlXqVKpVrV7F6tTmzYM1uSL0+rVrS7EDw5YFcLasWrFsv25FyxAtQYNz6dq9i1eu3b1z++L1Wzcw4L9lC4s9/DUx18WAbzZ2CTmy4MF6Kce9bDizY8WbGXv+TNhzBdKlST9IKFml6pSsOSt0DRv07ePZMw9bOJBbd+4MqWtPFv268nDimBVm2J08wsLfq5u3fi6cueXg1BFGSJ79wsHYsqtL72ydb/SHly9kz06CO3nv4sHTdm88Pgn02VUM7O77+3vb7KfH1+ygB+rLbgT8/PtvPP5CUxBAxCirIDkMJCiqgxAAyC/B4hZ0KMMD95NPoPN2o9CoAjBEcD0QOSzPwfDiw063CYniwEIUV3yQReAa5HFD5Ag8YLkPXWRQxw5TzKvHEAfCjcDeVCQSPiOPjLI/HEUgEDUolQxwyha5zNHBEbMTQb8qd/TSzA3DBBOAB96E880T1FzzRbsCAgAh+QQACgAAACy0AMwAdQAJAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On+iwDb2ecAenrJ2dtKQInP5eVBN3//2az/6tMrGQL/lBP/wHaOW1T/rUr/uWX/0JgbFxKqXQD/4LynZUGqqqr/yIdpS3D/nCX/pjkoKCj/s1d9VGHMdSX/8N4wKF1bRXvbfBqcYUq1bDe70tIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wADCBxIsCBBAQIAKFzIsKFDhQgJGJxIMUDEihgHInzI0WFGihs7ilx48WPFkiYnhhzJMWXBlSw5onR5UIBEmi8TxnyIUyDMnQ1n9hSK8ydQAD0t6jwa1GZSjU6fKmXKMKlRpkRpZnV5dafVpVQhRn26NWXXmF/DMixrku3HsyzTqhV7k+xYuWHxqnWbkS9GuCP1hvV78u5QsEwFUyUM0nBRxEcVY3WslTJXyEAlH2Ws0rJZzF4Pz6UrVWldzWhFj+ZskHXOuah3uq55WnVe23s9t9X9FnTqx6MBzIZaG7jarwbmGkCoYIFU5s7tJh+MsLr169iza9/Ovbv37+DDi/8fT768+fPVqS4XMD3s+vbqEcJn+j74Zt+y8ccEzJK//bX69RfgSP6JVOB/wg1ooIIddfVABhBGmMFDB/5XIYUMyoTZAwd06GGHFDh0YXAjNkUiZh98qKIDJiK4X4YYnuiQAyqq6EGLLhIIo4g74rhQBDWqyAGAOQoo42qIcRCkihEQWeSCR871E5BLfhgCST1KmSWWUQJQQZU1PiDWk1Ai2SWNH2IAAQY13jgmmRp2SV1DFHwIgQAQ1Mjim3DyKOdiiHHoYXUqhshln36aqShDX3ZI6IdiHoqok1r+CQCVj3ZYgY+TJmjpfQ8peUCmTXI6aYmU5sZRCBRcIMAFFMT/umeqnaIqqaoduXpBmZ3yiWulub7Ka6+2+jpnRyAIAMKwtW5pLKAibVDABswiWp9a17onn3LbUofet+CGK+645JZLLgHopqvuuukq0K227HEbL7bvJobTAsV6uuivVMX24qdA2eqvkfsee5txwCbMr70ILwxtwZHhZrDDk8Em8cMKT8xwZQDn13FgF1cMscgHczwyqCf/ZnLGGFMcWsMatxxzZiGjzDLJ/dYcsLP63vzyyi7v/LFIA+s4tNHH6exxykuXfNnR1eK88dNM/1s10Upb7bPQScMs89dSR/zYfEdlG9+88JKdn7lst+3223DHbRO7dKvrLtpnq72T2fTV/yu2S/jynO/gWxYdtc1Bx5U1wVs3nbPXYUeO+NSfQd2g4IUvjnTjWj8O9MyTg/0z1ZwznjjIkIcuOddOV3715qdjnTrrscNO+W6Wx/l6S5ofTjvoKpNeu++O395b7jHuzlPvlyOfaOu4K6976R0Z3rz0yUN/PPbPD7/87MWL/vvfwgMf/uqKjy2v3jHxXbbfO8st//z01y933fgTcDf7LLkPlP97g9/oTBI455kKfaj7nPjOp7oBRo96xOuc8fqCOQPSinyug+D1NOgR5k3Pex+c4F8qyL0DOnB7HMweCBtivRAuUIIYfOAKu2e+BJbvhaaroewUiMAc4rB6HlShDv9tF0MUztCE46MZ+GDYQyIqkYcNTGIU0wdFKVqRgcHL4BEv2ETeLdGHXdyg524YRheWkYU4aYAAA4i3vrXxfW9cm/3mSMc62rE7eVufHukVx5gg4I+ADKQgA0k4C95KhBQpYAm5OEUbanGIEQTjzyZAyUpSsgOtIWEKaSi2DlQpOlAx5LPOWBWBjKBKGnjNIg/5Q96JAEwHwGQoVznKRmINlgcYQU1E2bMtIgUFuDyACGa5SSRikSUmCOYBTkBMX9byiiPRQI3UxCYVTcAnmnRmL5mSTBXdKU8qaiYkxThOj0jzQ5nq0DWxyctCHmcBKkqnLsXZSnLWs4MTQCdCPiT/S3bScpv3RGMATjCofXYoBaosJiOhKTtPOsqgBwClPxXKSlIuhCAT0IAGqqPRYSZUm+6EXnUK085s3m6kjSmpISdSAgGUgKT/DOkYTaLGPrZvjTe1af9wqqM7+vSnQJVfHvnIv5EAMKdF7QgDlsrUpjq1qTItpxCPMoCqWvWqWLVqAqIaUE4CJatgxepWTdpVY44krGgdwFhVusq0hnWtMSXrV92aVbhS9JnHPCtdxcpViy6UJXvlq1xt6cSdBPaqdgXpYGNyWK32lbCR7Ehjq5pYqXoVsg+ZrFofy9DCMnaylS3rX5loWNByNq+S/GxjQ+vXimLWIZpl7Wun2trY/56WtJ3liG0Xi1rPAta0vMVtb3ULXLZSdLfG9WUClsvc5jq3uTw1anRFctSd6nRBQc2udreLnqGmbY/fDcsgxzvI26Z2uN8DnHl9K1xHytCyZj2ve40I39HKd4dknO1lc5ve/PLXjPq9aBD3i14AZ/G9onXtfwVaxQLTNsAKaeGDF0zg+VIwufVFyoDjy977AvGLHQ5xZL3Y4PZ6eMT9fWSC8WriD5f4xPZsrYQrDGMDU9G/DqaxiFOM4NayuMYMxnGLUWxjC48QwwnuSU2T2pHqSve61J2uhrhL5SpbGTvezbIbmcwRJ3eEvGD+43qJPOEbE3DMMYawhkFMZh23Wf/AbE4zhU3IAQnY+c4SiFSEN2zfHZvRAkty057jXGQge5VKQRrSoF/s5zLDUAJgKtWaGf3mPjNLUGDa1KSFbGgOM6tOuBTTjD0tZ8ehCZcf2LSKfQxQkW0ABK66zpqC9IBRW7rUVivAdsBpJVX3WM0/RtqrLxDr6sxaRSTwNX1X3OrQIXpJElg0pxvtZnuKakmStrWCc3xAD1RpVtoOdqUremoVpRrOlMZ1pxnpAAu4+90W0LOyLxzXlfJ520NWt4unPW5x65vEKVkyeLUMRy7z6MoIT3h2Cf4/KXfZ4Q/xMkfCDGY0F5ra6E6JIu/a7DlLe9XA7ji3P/5rj+N73fP/PnK9Vxlukef74gAHucn9DXMeL5vVFg+yzEd+coyT/OYhz3kpCe1olNNc5yXn+dGr3UGiM73oPk85TDku9Izv/OVQH3fLq37rpqe75mDPus3pTfXg4vfqRnc5yrdu9qizHclPDDjEHSLxh0O5yXMHkML3zvc5MpyNBqd73hlSdwu1vd9qzxHXe454nPNWWtT6N4IWX8sKVOCblq/ArBg/+cPHGNCjgqikOW94uGMKogeIdtftQ3mAQvqhAlCRvPPleDlhOvSx/5Chlr7yjIEa9jWaEO/LfqZ4or5DgqZ90LPZqILmXvbDV2yXbn+Ab9boStHPcPa3+foOUZNJyd5+Me0h9uwgqV78y/8T6LFN6tL3niEpWtLmE++i1l+F+h469+rh3kAH+P///hd++7cQAQEAIfkEAAoAAAAstADlAHUA7wCF/v7+RzyKAAAAAICA0M3cOzs76ejp/owA29nnAHp6ydnbSkCJz+XlQTd//9msKxkE/+rS/5QTd4VC/7lk/61K/8B2/9CYEhcVT4NXDYB3ql0A/+C8qqqq/8iH/5wl/6Y5/7NXLIFo1IkU//DeAFVVKCgof4Y/kIY3t4gjv4kfu9LSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLAgQQECDCpcKBAhw4cHEyqEsKGixQ0KHULcyHGgxo4ZJYI0+JEghAMoU6KkQFLkyJcRYRYsKZOmwA8qczqY6VLmS5s/e8K06SBnTg88ffoEOpIpSJsRjObsEFPpUKFBrQKg2UFqzghVtT7F2pTs2IJRvaoE4dGs2JBanXYsOUGtUQgN3b5NalUux5JFVWp4oMEo0rx75+rd6JdxTwoqHwh4YHQn4sSO4y6GSPNkSoQCcrJsizlz380PbdZFCTonXtKlU6Nm2Jgz1rStU07gG3thbdmaFXY9kPsAWN69WwY/vRAEhQsCLlCYbjlscuQ1Z/t2C/0C8OvKmYv/X9j9O3jr2ZcvLCGghPnzl5X+pu2WQwEO788bQGhA6X4B/fn0X4AyDbgUaAgmqOCCDDbo4IMQRijhhBRWaOGFGGaYIAEcdujhhx0qwJ9/IwpYYoEnvhTAiiy26CKLC8y3nXoHKvXijS/KCNd4NfqE448r6hiefNrtKBOQPwqJ3VU0qojkjUqixySPTj7pYpSwEdnkSFbmWOSQPWrpY5ctYhlfmGhWSWYAZm715ZJZjbkmm29KGaeYR85J55Z3psmlnm0GWmeWMOm5J5VT4lkooIOemR6iIBkqKJ9l2cgopWdB2pGkjbqJqWKWzjmpppnmKWqnoyqqJpmp+tnnql22//qoqn+eSiBMBpoIIIm7Zqfhr8AGK+ywxBZLLIjIfihirygyi2uKL+W66JoxomrtpxBxiq1ptEZ6Kamggpvtt91WKu5D2p5r27YMpVtuqa5uSm688M467ZqyJkrvRu7u+9e1cuIL8Lvhmiowu/npC6uV+b6qcK0Hq5uww96eijB9FxvUr70cP1xxxAT/m3FBG3tsbsjoztsxxSffy+rA/nLrcqwwr9zywk82fLPJHHF6a7TQjiTts86WZezRSCet9NJMC5Ds0wQs+7PQQYM0NNBFf9xltSODaXPPKvNccMzths1yvWKPa7HEGLOtsdk7nz02zkjqjLbcaoNM9sRxy/+7Nsrruk0y3Hf3XTjYf+/dNuALlYy3zF/zS/jcaUMO8ctdw2m45IlHLrLgBDm++eeMKyT64ZQ/XnbnlQde+tusq8436nljDrrmtKcc++iWy276nA1U3dHVVGdttfCONa388sw33zTRUx9v/PDIb0R8Rwhkr/323G9vd+q8r04m17fbGX7jk5Ou+IwB2/66kesPvnvurscfevq9n7/4zAzX3PrsnNOb5/JHP/TND3wF3B/dgPQ99Q1QfO6zH+4QGMAIPhCADrwczTJnvgTCToD/U2AIP2jBEcLvgr87YAZ9xz6DlZCFJzTh/VRIQAqu0G8ghKHXZDiQ09mwhjdEXA7/9ddCCQpET8GbHkeuRz0lWq96snGeFKdIxSpCCHq8il4TtbhEKDKEAWAMoxjHKMYGAvGMDBmAGtfIxjauMQFmrB8KDeLGOrYRjv7T4QQhwkYMSOCPgJQABu4YRwyicSFrDIFaMsBGPHKQUHMsyBpToJYTNLKQItSjQNSIAbscIARvxGQRI0mQAWRABJ5EQSjzSMQYgmQAEvAkSgY5AEeWD5I8FEgGZIkSETDSlu/boSYBYAKjDKYwORkkMI2Iy2HuMieSoYxKfFlLUbqylaUspkqKM0s1LpOUezykQkyZE26q0pvWFCY2B8LJbSJEJaBEJys9eE2OrBGVrHknSiy5/8pHOiqXAEjkZ/R5AEb285b/HGYf/wiaP9LyoMEMpxxHUkfQ2PGbAE3oOjfpxu5cNJ0SNWQa3UiCC5Dgo/P84URBkoCWuvSlMH2pFxnCxC46ESI1tY0Vd8rTnioPi7ri4hNv+pCcPqR7SO0eSDuoUgOOb6nN3CgAfBjEpmYShy+UqkbpOcMhcjWkVxViVr/K1KrWboMI9dQtqSrOUQKUrSvVqlpdiNaIlrWt8vOqVd06TLiK9K9hrWBdmbnVveZ1rIZVJ1mnir+4LrawZ+2fP+fKTL8G1qx47SpiMevYxPawsYDlq1QtK9rHUnaBSUopZ0N7WCslUag4nelCjFpU2f+2xKe4za1uNQTUZsG2tkSlqW0LktTiZg+qkM3sZ6mF3NO+FbSXVS5T7YOfFOp1tdGNawcqEM0KePc1R4RuaT1rJwvkMzQoOUx4adhZ7PIVNwQ9AFXWe13pRhVtFRgoelFyHMayl7WKnZtnzpuT3fi3vu21r6Mgo9+7HHizCnZunwLT4Jx84MGDBedd6weCcsY3JRAgbT3JS6gBoySaRmGLiAPs3hOuBiXHNMoIMCzZtOYrLWqpAH0hnOAeJ2w4XunvisE63hV6QC3VGfKGAQwnCltYsxnOqIT75gALWPnKFgAvjXOm2ghjScn3JXFyIRjlYY6ZyTt27XALQlvhBnf/tmtuy27nTOc6K6i3eMbab928Z4UYt7jNhaqeyGfXMLcYyjUu9Jmz61Qeo9nQ9gXzooscafGO+NBEbm2iCTvlxUq602IGdWS5PFlBW5rFXh7UpwOt6lNnGtU+tq6jGU1rSpN50xqGdKxJWGa5mvq/tb50pYFt60dPWtOktnGXb51sRYt62Ag29rN3jey6LVvaXwZenAfSZji/WSHdvq2dx01un+a5eH0G97YFEm6xsHqyZp42tjsV7+9RV9r1ZuUEJhDNfU+gOrpOzLsNYl7iELS/Ad/LwNFC4JToeMkKv7YI89twEEP8LQsXiIm5eYDRHLvUNWNwxVOCkY8rm0ZOSDb4flOi3jblm0YvHrnF5R1xkJsEmpNJccIxLvFRUhzGhPnKjE3u7Bt78uFE53S+Ch7kV58cUjjxCsBpznObG8TEKrmw0wUSEAAh+QQACgAAACy0AOUAdADwAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2ecAenr/jADJ2dtKQIn+AADP5eVBN3//2q7/wXf/lRT/uWX/6tIUFxR3QkL/0JgsGABPV1coJyaqXQCqqqr/pTj/s1f/rkwsaGjUFBT/nScsAAD/yIcAVVV/Pz+QNzeqAAC3IyP/8N6/Hx+70tL/4LsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsCBBAQIMKlwoECHDhwcTQoTocKJFixUvKsyosSDHjgM/ggQpkqTEkSFPogRQcuXElhphXpSJUaXLmTZH0nyZ0+RNnzd3UuzZUejPlD+NMlS6kGlSojGh4nx6NCrVoFJrVp2K9apLp13Dfs3Kc6tWsSvBeiRrVm1Er2nZbnWLFC1KujrlLtXblO9Gv3bzwr0LeOxgwYHNGsTLsvDbxIofG4ZsNfJCAwgNuMQsQPNKzp5RgiaMsLTp06hTq17NurXr17Bjy55Nu7bt2wRy697NW7eCzJuBfxYumnjHAMiTK1+efAHj547rjmROnTn0w0CnV98e4DrkCQ/Ci/9/8Hcl9+3eJxeckKC9+/YcFjs+Xz19XIUc3usnLxkkfer2EWbQA/rpF8Fa5v23XICIERRBgfpB0N9xCirHYHYCQQChgRNqVKGF0TUUYmMFPbjhex1I59+HyF1YlEoS6KfBBRpAOIGICbLoYmUCEfjeBQJcUGAIKlKo44gBevBeaQXyRyJKLLaI5JTrLYmQfh50eFGU3VEZWIztMfnejUV6GOWOXJUY5pXuSYAglGd6qR5BFqwpgHsHvqndkdi9uJAEHVAgAAUdFOpkmVvG2SePCglKwUNucYnmWQs5Cul8ilKWpkIZCJDBpTl+OGlZDG1QwAagwnlkaCONNlxnwcH/mtdttNZq66245qrrrqf15itvv8lanLCtGgeSqyt+6Jyc92n6kKTMCugsQ9Auuqm0e4oabYPNZlvhqENZ+2ymc2LbbbLaiktquWbyOW247ypUbbx7qbvQvOxiaK6R6dLbl73yknvuvtzy++22+hbcbr/5+ukvQfgOrHDCibrbMKMEV8ywxBQ77K2C4KZ6MUQRZ+wxxxaVPPHJJk+kcscYr+xynKwea2xHyBZLbFS89uzzz0AHHfSvRBMQbM0436xRzjbvrLGCywJc78MDvcyyzNcufLDU/1ItkNUxw5z10yAjfLXY41qMcthnpyxwy2O3PbPacFO6NslvY2133dTm/432unenvTHf8I4s+NZeayk33nTrDTjhATf+d+GB9y354ns7fq/fmD+ueeSDfz614ZaHPrnIlW9+Odusxz236Z1TDrlBUTqg9EVMJ+300rcPJfTvwAcvvPA6I8377rj3PlHuFh3g/PPQRw99yKOnDjrU1HdNuuqwt5756bRz7r3n4BcEtuvof+/26umTH/v1ZXNdXuLnqz++7FrHnzii74fPvv3twx/Z/pO9+W0PfgQ02/1QZzD9HbB6szOf+ALIQNFJ8H/uWyAE85dA+cnHg1+bIABHmEHGdY+CGywfxERYQhIKcH0ndGEFVVg1Fr5Qg9pDl4JshzyLMC95PVye8v/2MrwiGvGISIxN8WJlPCA20YdDXEgDpkjFKlqxigX8YOIGwMUuevGLXURAFvVkPYOA8YxfFKMCUZjDjngRAxWIoxwrgIE0jlFxOCRIFz/AgD760Y8I8KIaQYhHNuqRiyr4oyJNIMg78i+PA+EiBhRJyQ+E0ZE42iICQEBJRaLgkmuUYQotMoAKdJKSdRzAIPeXyQcWBAGnpCQIArlKVxqwjAUpASVPIIIToFKVmHySLQcCS0WKQAAiUOQsgRnKFjpzIQPQ5R9Lo8hU1hKXt4ygHovpR2r68ZNcvKY2tThMgUhymgj5oyXDGcw7dpGTffQmAxgJSkI+0pCR5CIf45n/zj4Gsp6sFCY28ynJOJYmjqkEaDkLKcqCnLE0aBSnBbM5UYJ60VERbWczoQlGElCABBnd6Az790oEmPSkKE3pSaPIkB9CMYgQcemlkkjTmtqUeE17ohBh+hCZxpSlBpGeUKWnUUJGKWoBvWP9njlSSIYQgzfEJxkb2MGkitR/MWTqKJ0KgKVGtaEUHSB9iko/GzZVqgwtHeIWek+w1hCqZ3XrVDk41qu2saJPzepXtXpXsZ6HrAv1alz5GlYYrnWgc6VhXg87zsSSdIVw3Spa26pWByI2rXvlHmPxitnBatayje2sZA/3Hx7q9Kc8bSlQFeLTvtz0tbCNra2W+KrT//Z0tQZprUKGylvnAfayR/1taAU7WsKSU4eg5SxlXzgCCDj3uRAgU1fNWtzMzrBOECLSYpOrWNH21UQQGsF2q8rWVkpMQyfKE3H7+tjjAoA9J2qPm9ZbWK4WSUnxbc8E6Ote5ZrXR/lNAAf461j7ZnIDGRDUaWgEIQJ7t75cKcBqhKSfDjh4uZ5NFYIpoODSMFg/KbiweUNbJvBuSEIiFiiJRwyAEcRXvdRlr4FVLJAQnIg/KRZujzYUn/HW1Z4s7tgDLEDkIltAujm2K4RJS97LYri6CPyxVY3KItMy8cq1JY1st8zlLqeGtsOyrWpTexncDqS3vNVxd4Or5P6uOf/GSzZugU242e4+WcZ05m575yzX6UYWz32+M2T1mmEoG/qChD50nK2L1Trv+cFuNqyeZ6xUOEea0pj686ILDejKNnnFNH6zpi892SBL+tP+DfWjkwxkVb/O0Zim8qj5LGdI+/ivbaY1oxEN61K7Gq2snrKqKmTlLBs7zFr2srKX/VowOzunWLYMqQNt6hk/WtDTrjWo1SztX1Nb1RIIt7jDfahqW0bN2C1QnmztZHN/z8QR0vVcct0h9J5Iut5WjHDhG98sYbu8+ZYdfvPLgn+3O+CoA3B+tYvwqvwWTO6ZkWoygCrGXNvdgON3e4C0mgI0/CjCtXcCJI4aClD8460tZhC8CyQhg297W+mG0Lox3hZ6Iyo/Gyq3xWOdGI2/p8fsfnmfxjOeFGR7IQEBACH5BAAKAAAALLUA/gB0ANcAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5wB6ev+MAMnZ20pAif4AAM/l5UE3f//ZrP+5ZHhCQv/q0//Adv+VFCwYAP/QmBUWElBWVv+jM//JiapdACgnJ6qqqv+xVCxoaP+sRywAANIVFf+cJP/gvABVVX8/P5A3N6oAALcjI//w3r8fH6ufkrvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiw4EABAgAoXMiwoUOFCB9KlBhxosWFFS9KNMixYEaNFBOCnPhxZMOSJgF0XBkAZUoALlPGNDlTI8uONUHm1LjzYs+NNw3+JCnyJcOhIY0uDCq0qFKYTpUifTiVIVOPUY1WPZn15VaFVwl+xdhVZlmaZ22GFTgWYlqdb3nGnbiW7VyLbaE+Jbu3bsu7RPe6Faz3qd+8iAEnNVw3MWHHjNdC3jvZ6GHFVDE7rPzy8mPNXD/3bQz6aGm+gsd6Tn16MOvRklsXpixbNWkDew0gxP1UtwDeSn0D94qwuPHjyJMrX868ufPn0KNLn069uvXrAgho3869u3YFu3OH/+89Pnj5zmsXcNZa+/Rq2qJfRw67nrjEB/jz45ew2T3p+PA5JEECBBZIYAb9wUZfewAylIGBED4Q2nxX1WeWQxZACCEFEyr13lPrUaAhhBOYpmCFDMq30AQjasifax7+pyKITg3YIoQeoBZjbA1K5RQEEG5QwQYjvmgbjzP6yBAJEFYgQAUaIgijZTIGaKVCIBhYnIYk6EglklcqyZCNBG5pYI5eogcmjT0CwGKZCEGogokUMmUhWg+JmICZBJZI544LtmkfhnAKUCCHHX4ZaJLsSQQBBBcIcMGjEEiYqJqLhtnoRJFegJd/a4qpKUMcCMDBpyfameKoC3VQQAeo1v8Z1J0j0XrRZcO9JJx4v/GaK1rYBSvssMQWa+yxxHqnLHfg9Uqes+ZBa9SugDKl3qpsMmrSh6JmyypI3G76ba2gZurtud1iiqKgF2o7UriDjgtXqrNim664iq7rLp77qhUqvvfGm6+q7PIrr0XwtnuwT+Xqu3CsDz+UsMHoApzSxOQWnLGsN9kql8a3VlmxwAG/K3LJFKPsr7kqz9tvyP+SbLHM2548s8IjmxyWA+fp2nNK1Eb7K7nIFm300UgnnbTP0jI99EhBT/vzSAdUbfXVWFvtMcMgI5ze1hDnDK7NNOPcstcxm31zyheTrXbZbNecdtxv1z323BtHvFi1BL///LHfdLlN9+B5D1xv11wDDhTeLuudGb0d27t24er27XiCikssOOWTN2545IiHfXbgjP99+aVtl5746X9+zhLYgWXuEMaejy566ix3brrYKzvMe+26k34Vz00DPTXUx4MUtVlKN+/889BD77Sv1D/7tEYNZK/99txrD/vev1s0wPjkl2/++Ah8/7jsDZ3vfvnpSw435yaRj0EE+OcfAQbmxx967KwDwPg+wIACGtCACICf+jAXwPGt4IAQPIEC5Wc3+o1kABiAoAY/QD7/sY+B4ZsIAkSgQQimoIMLRN38LhKBEmqQfwPwYACnZLuHuFCDIkigDEN4uxVaRIMoCIFy/0qwwxqCz4gN0aAQlXOBIgavhxW8iAkgGMTkXICIKWwdEhmCgBsa8IToy2KanvgQDHqRARwMIwUJB7z6DYCELpQgCtdoQTaKbwAEdGEC5/i/I5LRIfbTX/5gyMcPqjCKd3yf+5zow9XxsIyKXKQYafjH9kXyfIxEpCO3uBAEePKToAzlJ5OnkeUZr3gmMSXRosfKVrryldCZnvWqJ7SnZO2WWJvkbB45u6/RsY2aXFzuGglFOwrPd5zUYiWtsjlgGrOYvbMcL9cXQNrtLpljlNswgwnAaTJTddB0Zh2jeThDKpOYmgNnN7FJSW0ic5mHfGY6tylPP6Kzl+q0Jzf1ef83eo5TnABFmz8Dukl4gqWZ1zRoNnU20ITek5quW4kuxWjNgj4UhJUr5wx3ic26EO96pSTlRVSJPFTCBZYoTalKpXdKkI5UpBYhqfJgKhFc2vQAE/2lQK2V0z42pKLhdOg+f4pQiw4VowvRwASWytQJvOig+YQoOzmqkAyNaATfbKhR6ylVhehpRBpYSlGDulXOvalFiFLJWNepUBqS6UYQgKpWyUrXdWbpRgXiD1DZetFEPQCvBkLQXvn5z791gAOROs6QijTYrvaVTgVYDpRwpNaoIvWxqDnsBRJbnMXKqbJz5etRUffVFvmpsZcdrTI1gNe0ojaehbXoCG5kKdD/vhOz7XzbX0ckJbneVrULpdsDLEDc4lrgqbaV5lQpulbCErSu+Aytc4XK1awy5aO0lJpJZ7pdhq30u+ANr7BkWctZmtcoN8VlT82Z3Jtcy6epra5YLQvb54q2n7+V7zmB+9r96je45AQde3P73/4C2L7TPaZy20pVzBqYwLGFLlHp698I3zfAr9NpWRE8Yek6FrgQhpmH42vhBAszvyX+cIGbq+IUk1jEKObwi6mLYYlqWMIzVrBGvVlfGsN4wbhtMH9ZnGMc91jHAt4oc3dGU4nINKTdfWmUiSLeKlv5ysohr3ZdGtMmP+TJhDmykSssYyXf+MJhFjOafczmNIe4i81jXiil5lypAQv5v+1k7YjSutwz27O0EAorj8kMZ7YC6UZ8DvJ6F/LWFqGJwYvGEmAJhFw839nFfp30gfoM30R5IEhOQk4FWABpP0O00aFOTgFK3WktHppAQkoOB2ClaFMjFdAaEjSr7YwSPbcIq4M+cKHt+aAW1XbXZn4Ik0YEAl7b+pD60U9DAgIAIfkEAAoAAAAsmAAaAMUAwgGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/4wASkCJ/QAAz+XlQTd//9ir/7hjdUND/+rS/8B2/5QTFRYQLBgA/9CYLGho/6Q2/8mJql0AKCcnhzs7qqqq/50mTldX/65N/7FULAAAzBkZ/+G9AFVVqgAArigou9LS//Deq6KYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AERAYSLCgQYMHDAgQIPCgw4MIFhoAQLGixYsYFQowcOChR4IRN3b86DHkRIwoUWo00ACBy5cwY8qUOJKkQwQLAujcybNnzwUHFub0SdTngoUpk2JceGBo0acBjgpoChWqVKVYKS5sMKCr169gwyZg6rSqUbNQCSxE+xRpVqULCbAtGncuUbdvUy5EELav37EC5NodnHYt4Z148y4NfBgx48YBEiu2uNev5a+ABUOGrFbAZsmTtT5uXBcy6NCVL1/OvHlz588CQi/WfLg06diyKTNUvdp2a8KvTePOLZo2Yd/HhxNPzbsv69+Hg98mXhH5YOt2T09m3hzsc+iDpdf/Vp4b+1zzbLUr5t7d63fwc8Unp14ctvHr5GWzbz/gPXy08uFHHwDooVWgWerltV97/v1XVYDZ5YfaaOPdF+GAC3bXoINPQXiehNtRmJyFH2K4G3+YHcjhTh6mB+J6IuJHoosm8oWieyquGECLBr6oYIwRzthjjTfiCKSOPfGIoI9v5djWkTTSl2FzGyLJk5JVJdgklD0KuSSRRfbnJIdYQqVlVmPexeWXUp5YZJVW6lTmkyZ6meWad4L5Zpr/zUkXk2jiaaagdLZp456EkmnYdG3aOaijhVI3JW9wxumnmnXaB5ueN1aK5AIhwXZSbhrhJJypt40qW6kJtOrqq7DG/woYquMtZOutuOaq66689urrr8AGK+ywxBZr7LHIJqvsssw266ywDdl0UEJ7SXuTRNStVJO1INHErUEmZevtt92KRC5IZdkFlFCNXdUmVYdJBS9h7ko6VbpzyYsvW/vGt+h4mQoH6Z8B3zawg5f6dCZWfPbUME8Lw5Womgf3+e98jWoqXMEVxtnhxQJmLPCmIhvscVEJOwwowxMr3LLKHI94MlEpQ7yyxBWrnLPNMcs4s081O9ZzkCTbu7NjR/8WtE4RJ/Uw0kUv97LNSbe2dGQ3Oz011BuX3PHPV4J8odcyd220xmDLKXaJZPtsttRVP6302lGePfLb5W3NtN5W0/89ZNtE460f33L37ZngE8ZNeNZ6LV61a36zabfJiIeo+OOcRZ4n4B8qPnTnabOo+aCfu+g556aHrvbhjE7+detwo5321U03fnnUed+uOu2Mo1T4773PJvvPoI7+pKqhldpvj7Qmh/xkyp+6/JLPVm/99dhnr/323HfvffXRkkstQ+cOFC5x2pZPgEnbfns+qeOey776zasblADTW1W77/e2S5b/wbvI/+I1QHpBjnUAQ12XcDc43YWOd6VbYOVg5MDZGY9gClzS6VxXtgdeEFMZvNMGY3c3DyIQYxx0G+xyN7yfQTCEj2Jg4lo4sxemMHArbCANT2ZDElIuhzMsoQX/TxiyG4Jugj+qINh6yEIhotCHrxuiDC23w7pBsYNSROKWlFjEK6owi0CkohO72MQfgjGBRkydFgPFxRp+0GUR1OAUKVhFS70RZjB80gjLGMUl3pFnefzTHnU4RjcScWxplGAY6VhIHv5RaIGk2ByTWEcrMZGQZnwiH7FIvFAJ53mKid5t6hchUOZFlOMh5Ye+x8pWuvKVsIylLGeJK5nY8pYNKNUtdwkTbKEvfuSaX/netypgus+Y3EJAAsI0gAYsZJmIMtG8BqMvAEozf0+pZrygWaSQcBNFgImjCCe5xUpW5Zso8maYwhlJlw0yiJkcDDr5o85otlNn5GSjOaEy/8/21LNT+xNeI624yS8Spp/d+Sc4AypAx+WTZW2cC0Kbo1D+sDORclwjRPf5lInypqIMYqhuIkpQTPZRnswEqYZEWh2HahRnHC2KR1WjUiqxtD4D/RtGxwmZmV6mppS6KYFcukhK5tQsPrUMUFVzUS/iEI1OPWJjkuqXpfYmgCONKQh3GsOepvSZ6xQq8N4pxnjahap9saplmlrQp2rSpJxEaZjU+hexEhWqbZXqYdAaFro6x64k1WlU1TjVrwqArynCaksDK7nBKnKvhkWskcTZ1aKW86jnjGxYFYtTs7KNq3rcjGS94lexAFarcLwn1UQrq9bGypuujW2rfAk/hv9g80mqdJEp34LK5OS2R7QMrnCHS9ziGnd74fvW+JKbTNoW01zDRKa1iJk8ibSEl7yUrrR+W5V14c+a77otUbRJL7tyZbPTVNcBHwpTzJLOUMz0FJIuCc+TIlJqhwIo39b7Uq0xdnP2yu9C95u5Q37WsRm1bKAEbNHC8VfBG3VvpPAbXwcXmL3+RS0eA1xhAjeGvmW174HzxuCQejg6j9wbZUPb38aVeKUnBk6KsbZiQWLYxR3G3IdnfFoJY5DDm9Uxig1c0vrG9b4kzrHqdsRjzg71v+8FMqKELGMiCzavhMXr4F5s0xiHp8k1lmSL+cfloHrZLiBmpGeLHKIyM9X/wju2cmOx/Ni3TsjNV6Xyl+UMYATzFMISw/Na4TzkG/MPyhNOcpCXnGajrvnKW1by7sCsWq4BWmuCruuZ+eXJVInLttL75KeVKdvY7kW8ajquqlfN6la7epbMtdZy6efc6kJXftq1CXWhl2uSCFN+qP7J/YLNk3pJLb35KiA17YpsfilbvRces0AfPWe4GtSFlAatjaXdUEQris9R9nNltWzkaxvS0NMWMZvVrG47gjvR1nYrGeOtVz+++8fiZvGlbadh8DT6stTuM50TfEY7h/jII6Z3lu2N7m73G5LaFrMJG55VH28139ueOLcrHvBwD/zPBZ93ueWNZIXXGdv3/774x8dtcHYj3GP/1qfFUxtxdzI62xiXOLkPbm5HppzmObf5xhf78Ln9fMNBx+fQO9tuS+J85freuct7DvOnm5zgLXd00+dr9ZHXW+Q8J7nHind0m+02K73FD3fvdHaspD1Cax/Uq+dO97rb/e6+wi4vc1mt6G7k0xyh9a2DWWteD/6Yh0/maLvizO8S0MnPTnb/Hn9NADYbLQtY/AA6LXWtvxzSXl+4XLvZ9bB/veShP/lZDUtxos8c6VDPOGTnWvqpi33dnqc6WjTP+awDfOsevzrID8r6pT+56DSu9N6qxvva5/72oDe96FdP+7JDPOmrNf5YRVv8fR8a+T3ueP9Hu9/533++2qnH+uhv1Huw2/70CU//8NefTuebX/fol77qJUp+38tc/PgWezpHfNXXekx3fgInfCxHf/Rkf/8HfPAmfwtIfaRnfSqmfFhDVu83ffxXgNp3V/4XYQBIFM1ngclXc0rnfemGgPzUf+73fPCHe/cHfUjlgqinf+p3gxu4f2xRarHVfqU0asRmM3F3PEIYareBd0q4hEzYhLCmPrPmd22nFOkjhe3TXH/3S4k3Xb32EUU4XsMGXvZyeWZBXssGeZNXXmlITQ9Wfg/IghGIg/N3bh/obSqngFHHcHUIfmhohxZjguEHgQGIh7KnhyrocK8HSCiYfRp3iBz/J4h3KIF5iHIGeHyJeH0CKHSN6IYiCIlAl4kpGHI6CIMcKINviH9cB4h9yIdhpomiGH9yOIGwuIM56HOVuH17eIlGd4sg+IIzGIPuxot+CHuEOICUmIsj+InF6IqGyIntlYzEKImFeIyO6HrQqIjYZ2mvaIqdCIeDKI3GSIfVeICoGHzgyIzU6IwZposnmI3LpzpkJypHOEpD6BhTmBRv9yFfmGpO2I/++I8AiSz3mBIroYUDqRKFF0oJeUoLyVsNiXYP6XYm0iYTSZEWaS8VOSAYuZEcKTUZ6ZEXqZF5E5IjSZKD85ElKZIpuZIseZImOSEoqZIw8pIhEpMwSZMz/ymTLgmSHdmSN9mTO6mTOcmTROmTNYmTPyKUP2mUQ1mUQcmUSamUTfmUVLmUULklNimVjYOUWMmVgZKVU6mVWuOVLAOWUQmUVimWW4mWR8mWYZmWcKmWDUWWEmOWXemWZymXa+mUcfmWbcmXf6mXi0GXY0mYewmYfimYQ2WY/GOXX8mYg6mYjYmXd4mYeXmVjymZc0mZmWmZlYmZZamZm+mZnQmadQmZoymaooGauuGYocmZr6maiwmbp0mbhWmbh0kqgGeQu8mbvllbB5kREUmFAVmcxnmcxxlr0hKFuJaFwNmbz2mQV8iFW7hd9RgVYUh5rNlSrikxZNhdkccvbf8YgrVJmrEJioy4jdGXmJdZlbE4ieKojrnpnuz5i6UYjB/Ynbfpjhl4c6qon/PZl6TIg/g5jrOJm5O5iNrYjOS5n6bpoMsYigzqi6VJn+15jhKajg0aoIFpofZJoE73n9u5miNqidcIHTF3nh5aoRianhM6ip+5oiraoguqoRQ6ox0qoB9ai1UnoggamT+KiCe6i/lZooHojQjjgDhanyz6ntMYnxuaoOZZnuhZo1B6o1Qqo1lKo+84aT46pRCqo6dIg6nIiwAqpVXapWkTj6L2m88VnBdRkFq4jy5DpyqDnHiap3o6d8ppE8xJeM75ptApqNIpeCyhd7t0nd6lqHb/daZAemyMKgDn9SbjiaVhmqOYSot3kmnOUakw2qR++QCiOqqiKgGPmqZYw6li4amzeKE5KgEKEKuyGqsZcKoRSoTxxarcuKU5mgGz+qsPkJqoOilMpavr6apheQG/+qsUIKy36hiq6h3Gmn9MqqIUsKy/OgGtiYHEuhrTmoBiep4TgK3LaqokqqB7E62Y8a3mmKl5Cavk+qsiwJ3c6iadwq5xWK3lCQG/ugEWsAHYaq5Hqna5Gm0G2qgVUQK/agECYAHLWqvnyp/dulb4+o3u+pkhMKu2sqwlQK/omqoFG2dmSpLwGqsbO6vz6rESa6/gVLGReLEVOq4muxC/qgLb//qxE/sXLquM4bql16oAJxur2nqzK6uu7rGz0QizM6qsQEuzsdqszsqlILtOSIuND8qhEAABFSAAFZC1EBCsUeukamK0XZEAVYuJWnqpFLG1FcCrmjooZNsfZ3uBQUq0GMEBAsABbjugSxK3Zmuw8ommFuoBBeABe7ujmxqyhVakdauyw8qyFgUZbOppbmprcGoRclpbdkqEPuhae/q5oBu6tNSnJPGniHe5FZG5hBqdwDmd29WFJaGo2amGjhq2+qo136k/awhtIsu4YMqhbwuftui7Vyu4UktoVTayjRuxj6tnaKakhwuqyMq3czi8B4uGtfuISPqHyvu7xiu26P94pZ+6pNMbo8e7aWyRotFLvuYLvhkqvq3avrcLvNQriz3avcVrq+frvP7ypflru+U7psAYovibtvSLuPZboIGrv0q7vs/oifChvmobwOwrwPdJwMRrwN8bvE9qvQsMwPJLwQ+8vREMvRMcwihswSBaphncsyeswjyqwFHKwPO7wfUrvGzhAAWmw4fhAMOJjz9MkEGMkIFqa0njw3qDxKK7xEzcxNaDuhShuoYHxQAgxVM8qJaLxdBzAFzcxV78xWA8EUwBxmRcxgi7vAfqkRxRxmzcxUixxm3MxrLLLtqJxmcMqZanqHPbjv9rtxq8guW4IhJ8wNKbwt0YyN9WwC7/TMiHTKY6Msg2bMgOvI5DajiK3MAv3MgDzMLXm72O+6xqqp7UKsKZPMn8xo57fMd9/Mn765+XXMM0fMMdfL8tjMmMPMKInKT++8cgXMgwXL203MlGuoqoDLgz3MsVnMy47MiCbMK3XMrQfMqVnMrYO8yt+L4efMx+vMiR/MsJjMHCbMfEPM3GbKnPfM7drMkXzMkfvM22nM7LvMlQwcNDRs+DgcRUbMUKWcRXrIVHLDdK7MQCPdAEDSz5PMRxitAJzc/7fNAMzZAP7ZARjXYNUNEWfdEYndHMe5MZ3dEercq83JpwHMdk/MYkzcaa13htMqn65ckbPSG5m03hiXkl/whfm+XSady83Fd9Ng0WIBABQB3UEYAB3gHS3AzIzCxThtXTXoEBDPDUUA3V38RWIc3K7uuiDOhPN8UeCUACUf3VHZBYOD2wSU2CSy1lXQECX73WRF22Rv3OSC3P48fTaJ0Aa73WKDBZ1lyvzHfWFOYVHXDXaw0Cbl3N4jyMPFHTUubUgv3VJLBMVH3UyBzP69yCdP3XAxDYX30CI7ArJhDZcD3ZlAzBie3XijYAjB3Vnb0rFQDasCza0kzaO6HYmJ3Zm73auFIBn/3Wr629uewTtH3a/dHYUJ3XhT3W4yzbOhHckfYVEUDcDNDWYmLY3hvXlT3XFcjU/eHVgh3Wev8tzteM1RTIflsNuam91lPN26Qc2yRc2pct3F4h1EJN2GK91zjLN8x9Zx2G3OFtpR2Y3Wg9YPzN1zsN4LUt4Pa9sszXubKSAgqduguRAgwuKw8O4RPtdhUexQC94QXd4R7u4Q4NxfoM0SGuxQ1t4qd00mUsxlOh4iVN3av80jU50i7OxSZd4148x45Huwke40MV00Vhhry7uOFc3bD9fcXcu0Xu42Qt14lcy71t1RwcjtlszvAczVfO3r9dwrss2e4c5eRY1twL5eud5UhOzkrezlJe5rGMwDgsw1be5r6szKPd3ijqzGYu53Su5WLO5a/M5kdu3Sv8yHiu56ac50L/qtyWTOaSjOWCHsPgrOYyDuY5Dcr96aV/3ujo/OjADOfje+iG7uiJbudEuuRVPemyTOXBLOmV7uVr7uaz7OnxO+egHuijvuX+Vui2/uWA7tt9zhP2vGfBPhf4jOISXeL+DBlI/M8f3uzOvsTIvrrSnsWV28+suyo47sUsnu1erN6ajugize02Hhs0XuM6HqkDLs5APl4zXYbUnO5G7utOrsuZTuuXqgETkO/6PgHmKu/X/eSm7uovzbTL+gGc/s3srM2vbu8B+rPYqgG3/ut33uWhzesxKrPkCrXWqOjv3uNPWbLxCgH+PujNTPGUrsoZG6+y2u+4uIkKj+rf/qgP/6DyswqxJsrx5fzpom7xj+kBHLC1uPKvARvm8z7mAV/x9FoAvOKw8rrxpL7oR3/yuOHzFQD0tiL0Nev0uD7x9b7nguvw5Dq0RP/v9M7oDC+4GqDyGj/2JA/wrO7tLPoB8Qq2Wi/xpf72MF6VM4+tNl/3Re/nZu/1DPwAF1D4hn8B/e73ZG/0eA/vOo3pgV/rPK/ObV/2Ud/rC0/5kF4Vw96/nY8WxV7tJy76EM3szP7sqJ/6xxnt1H7trT/t1g77oSTuCfHjtA/3gr/r3FnuLn7j3H7uYhjvmS/5LbXuRtHu3dXx4D3MiM31kb/zw1/nW3/3L9/qSA/z3vzmkV79uP9P/Gy/+YRu8piP/bk/8uBf8l3v/dY/5eFb5Tq/6bpP/pRd+YzP/Xkv8Df/9Mov/PIP/d/f6QARQOBAggUNHjRIQIAAhA0LLgQQUeJEihUjQrSY0SJGjR0vCvAYciEBhyUDjDSZUmVBhQxXGuQYMmNMmRVp1px4E+dHki8fCujpU+jBlkMH6tyJFKfSmkxlojR6EmhUqgGKRnUqEuROm1u55vT6lSdVqFWHXjWa1aPajmw1up05Fatcs0LRDoW7MazYvF3Fgg2Kl27dl3eF9qWIGOxfiYobDz4MmXBKwz4df2SMOfNlAGUFB55MeSHZvV85ny7N1XNk0KFLVn6JOnP/59RJay+VHDv3SgdRW/Ye6mChAcYGhhc//te4AOLKV78Uvjtl9IXVrV/Hnl37du7dvX8HH178ePLlzZ9Hn179evbtwzcXuxw+V/nImdufTz95/P1f6yuf7TLZNrutqQKfOlCr2QJMsK0GHWQwQgIlXPAvAR98C8O4KLRQQ70qZOxCDvny0K8JLYIgRRVTLMFEEE0rcbETQ4xRRhot0kABHXfU8QMXX7Rtxg5HhJFIACjgMUkNEqsRRBGFJNHIICkSIckkKWASyCJvhHJLLr+MSAIrrYTARi0N7FK1JjUDM8qJMhjTSgkeO3PKIdO0804wH4jTygzorBPNNr3Us1A32SMKIckNLMjOghXYDHStNQccVE0OxeSRUe0KgDTSDPHETcpQoYSAx0Wxq4ADDzr19MNK8zzUUEIjQrJPBZYEtNVPXx2VV0EHzbFPH83U9cdZj7UU1F8n+qDPB7IsdkNfERR12Vgl4nPMEIyNNtdrkYUV3F4peqBcc8t1tVtoZU12WgWVpVbddb9tl91w66W32yfdlbTaePntKL+d/vOvP/3ucw5h/hQumOGDBcZpOfcmprhiiy/GOGONN+Z4YohrIvhh+woguWSTT0aZ5IVSZpnlhRDAL2blAgIAIfkEAAoAAAAsyQAXAXgAvgCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/4wASkCJ/gAAz+XlQTd/dURE/7lm/9Ga/9ms/+nP/5QR/8F3FRYRLBgA/6Q2/8mJql0AKCcnMmVlqqqq/50m0hUV/65M/7NWLAAAkjU1/+G9AFVVQl1dqgAAoy0tu9LSq6KY//DeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBBxcuDAhw4cEHUKEmJBAgIsYM2rMOJGiwo4MJYI8KHJkwYobU240SfIjy4guXyKMKROlypsBZMLUObBkTZovbeJMyXNmUQA+gwJlKXSoxqNJmS41GZWqAItOVxatOpIrSK8dm2a9CHVqV7Nf0Ya9OvbpVrUTwcaF6xFrW7Jvy+p9a/fuXp5yPZbt2/avzsAPEYdkexcvYLqJIS8e3NjxYckNMbekXNnwT89KCY8FLZW0VdFZTZ9VnRa1U9ZrYc91PVS24Lx8KwcwwNNAQt46fQsALlM48ZfCESyonLC58+fQo0ufTr269evYs2vfzr279+/Uj7P/NN77d/nh5w0cIMC+vfv37W1Hlr/4wPLG9DPnb0kb536DVUkQwYAERjABgJoh2N9N/520FAkKRChhhBggyBl+uD12EAYTdhiBgxf6leFlBkXQYYcUgJhbZyN+VhAFJ3YIwU4rYqhhixDEeOKBRtUo4o1AAjCBjieC0FOCDi6oUoM0CpTjhBpcoEGMPCqWmZJEtagUQUNOeIEAF5xYYY+PYalVkC4OFMKEze14ZIiFaVnaixK2OWEITV5mpltobllQBXUmNGGKef60J0dyWnUQjArYGWEFKpapG5NvGiRBhI4SWmhokya62kEVQGCBABZAYCoJFvoYZ59zLjSqBfOp/zqap2k9tIEAG8QqKYusKspQBwV0oKueuolnEnnBmZcsesseihF40EYr7bTUVmvttdAZOxKyxSnbLbPfanvUXJSSSaJ6B6Sr7rrsplsuUkhuKpV9vJJIa2yypnYvufvW1WmvnwLc2r/2CozvrjYWrHCaxNbL8MN+IvzjwhFTLJWzlkHcqsWnEayxrwbP5nHFHwfccMIl19pvZBjntPJkL1858sYpH3zyxDXzG7K/DpPsM82c9gz00CDfvCrHJufMM8o/F600yzM73XTSQTcmbkfcIuet1uBy3TK2YIct9thkl731eGcfm/a2a4OU9bgqNyD33HTXbbeVm+WFbrt8q/9bVgMDBC744IQXngDeqT5GL9NSIVD445AfHm+lEh/9k+OFP6D55ppzQLjk74o162OYD24CA6injvoHCQwOesz8RX1W6YEn8IHquD/gOuKRGj36ZbQP8ADuxLdeO+/ydiy0VbQnQDzxKAj++s5QLz/74CM8T7znA0yP9MDWp1U6B9oTz3r3yFPuu76kC5497ieIIF0J3j+9WMtljU+8/NJZUP/U4GMc87AHP/49xwL0S5+5DCU78bmufKmL3vEmt8Cq4UwpwRseBI2HPgrCC06/u9znbqc93UlPgR/M12veErwBnE57HOxg6BgjwOtljnOb494JPYhC0bEPeJAL4u7/eEhE/LFQiEj8H9HOYsT2ITGISpTaxYp1nhQk4IpYzKIWtyi5q03kbWqj4V3MRsYymvGMZfQiRMDItq6hzY1hVCPcEvc9m31mb33j2wxnuLgLLlFl1LtfA+0IQEJOMXyGlKIimThIndXRkQxEJCQLOclD1hCQj7xN5UJISU1msnqXTCTV/riWJn4SZoGUmSQ9aT/9qLA2sKNjK2O3ymHNUpYWtFwnbblLQdYSlad05SZ/eMveFRMmpjym+pQ5k2T2UpjBpGUoK7lITEZymqx8Zt6G6RQ5MoSNbmsb1sT5RR86BY3oTKc615kdby4EnOOEYxvdeRB4znGb0cRlafCY/0d27TGWDuqjLklJzVEqD5u8JGg2BZKBCjj0oRXgETIbudBqGtJEMfKAgiiaUItWklExykCSOApMZqaQIIAikqaaSVJomtQnXSKSAmZ0JGcqtKNJA4FMJSRRc8Iylfhk2KV2SqGatjSo2kxVBzYwqudIiUos/aVLk6qiAkwnTB3CU1QRWlKq0mipFmhqc57aoRUYVapIvWlXQaojSJ2Vq1NV61QzINOVwsumHq3oKD1AJAmMFK36zGtChyqmjQLWmF5d5kAEVKABSfStfhRsV+Uqzcga9LLWzCUnKRtYzJaSisuip0HsuUZyllaMbWGnalfL2jOKtiCkfUhsv2la2f/W9p6K5Sxi99nPvv0TqAgS6GYlG1fiVnagxu1sZi1p2eUmd7cHba4onetZkR02edXVawClW1Dqevez183tc7G7XeRmF6fftS5c0zpe8TIyvBXULXnBu17lTve+6uWudvG73/yaN739DTAo9Yte/hbYv8M972Tbu1UCL1jBxX1vfaELYfZK2MERBvCBl4ZhC2v4weUdy2sJMtt33tbE8gwnasfS2ha7+MXTGvFASlzPE9c4xfGU8W/zSWHc2re7Bgaxj+cL5CIL2Mc9JKJpTMVkJqMKbkne8UDomtE5Rhmg8mJrh0Q6risDF7FrUimUlYzlZcaUSDQtc3wZfFKBcIheqI99KZmDRNidjunLRD4yNHUKpS9B5wJSTqy5zqwAP0cn0PJd5pMiFCXoWCBXam6znA2i5RNxOdJephiVdaTRLs+Zx/Liq4786mlES7HOEzLSmE1tUAm4+tWuJkhAAAAh+QQACgAAACziADABXwCgAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dsAenr/jABKQIn+AADP5eVBN391Q0P/umf/0Zr/2az/lBH/6dH/wXcTFQ8sGAD/pDb/yYmqXQAoJycyZWWbMTGqqqr/nSbZEhL/rkwsAAD/s1f/4b0AVVVNWFiqAAC70tK+ICCropj/8N4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEHAQgIYLDhxEoGEy4sOJEAQQCaNzIsSNHiyARKjRIQoHJkyYxXAwJMmFGjzA7srRI0SAGlDgjFKw586DLmEA19lzIc2AEnDgn7Bw5dOnLoDCbriw4ASlOCASLSgXwE2rUrQO1QrCKVKJIsGExev2KtigFskhDpEUr8unajXS5jvSwwULCvwIuaLBqVqvUrnfx0q1ZALDjC0hVnl2sNrHitnv7+gUsuOxcynYT5zVcFa6CEVmZgkVsebRqgRVMK03tOvRd1wdLW62wtLbly6tfC5RAdjZt0L+FUl5YAYLz5xBITMVsey3u5ciTX8fsWzt27tl/G/9AayDheLDlBZzfmr66V8fw48ufT7++/fv48+vfz78//fVSpQdgUwKSZ15eWxnWlIJDUWTAARBGKOGEEW4XnGsHLOAdeBwG5x5UFiYo3IKV/RbiYSM2WGJr310YHosditidiS3K+KJoNaI4I4wuxojih0GdSOKOOPo45I235XgkdckFIKSKRCZpJJRIWqcklUxu2OOWIgIJ1JM9MRjmikVyqWOVXoE5k5hrkimlmUt62OSAQxWI3oF3urmWf3z26eefgAYqKJ7sERqgoQQiiiBLNTXg6KOQRiopm4wq9CCFmEroWgMDdOrpp6CGmgClISWUoZYiIhDqqqyOmuKYXsb/5JqqoT5g6622cgCqq1FaiRmtn5rAwLDEDgtCAp/yiiaIlAHbaQIgFCvtA8mS2pKeaTYL6gPSdovss9bShC2zvybbbbcdeKpsljSW62kH53ar6wDryokqisByEG+3KYD7apuxshUcsPAWe4II8llQQr1dNjmrp/oWi/B8FjD8o8PavmvwxPApbDGJAXv0sLr7Epuuv71m666n3Jb8Lb3hVsRamanuGm281KobM1HjBplxsPEeW+2/lYYs08+f4orrvDoTXWrPXyLN6tQfq2j0R1JPvWrVsGK8stat7uwT1DHR2VN6KCSg9tpst+22q2bP1F6Tg9Zt99145x1f3CzZ/1moegYCviiWcEJ5aaaZqlkphhq2W/iYKZP7eJuR+zwl5MtaPvnimUd9OeWdy3ol5uzyaOPnRXu9+dOVe776ta2LjjrroQt8+uviXg3c7bxffG+cvYOsevCEN/x78WeWTnPyuMtMtu3ME9/18aQ3z7Puys0Oe+0e8R2S34cK/jf2Aeht/vnop/9f4N6DBH6i4g/OufbiAnA44hQqTjtmpzouPejKe9P/5leQDFTggAisgFlSQz79ba8gR7HKBy7SwNEBkCC6QUoGnDK86HkwNnAxTl06CLzovcU0CsBKWipIP+cVJAQoNMkCZyZAD8aJODFUgGT0wkLrjU0gfNkMZ/8G4xkekhB5R2qMfCCDE9SMkHoXtFEQhZiQzuBkBSs8YvV6l8HdMFCLUfRgBmTDQSgSMHgfgIsEyug/GyIRh5GhIBjP6MaGQMQhC8yiGffnQznu8YF9ZKPp3LhF34mHfYjME/nUx8hGOnJQ7bPI++qkqLNVUn48c2D97oc/TVmQjiTq3yBLSEqrzZGPA3xaD1MJSOO1sZSFFN4f6/dJVZ6ylYQE2C1p2cLr7dKFtbzWKnMJSlPOEpi9HNswYRlGWb4SidCc3jNjGU1dHjOTwczdL7GZTD9Os5nVTN01f5hN5y0znKg05CjRiUtnWiaSFZmkJeMHv0U+8p74zGd94Ll6EHnK7ZJ9Aygme1NObg60nezk5UEVykqGLpSc3SSoQaADHeksSmzTgdMYJTg4jEoUTl1EyQYR5NHj+GgEZCSp0xC6xROaRoUFhSiXbhLDPDYUmVuCIwp3GEiTbgmGKNHABeJzAU3i9HYuNclQ5WNUg/JuLCcRasc2EBAAIfkEAAoAAAAs5AAwAV4ApQCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/4wASkCJ/gAAz+XlQTd//9mtdERE/7pn/9Ga/5MQ/+nQKxkC/8B1/8mJEhYR/50mMWVlq10AqqqqmzEx1BQU/6Y5/61KKCgoLAAAAFVVTVhYqgAAu9LS/7NY//DeviAg/+K/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgc+ECCwYMSKAwcKEDAwocQIwJoKLEixIYEAmjcyLFjx4cUFIgcKTLDRYcWU05EqbIiRo8wYT78QLLmhYcUW7pkqfNkxphANS58ULPmBJw8ey7MqRTpz6AxF04oWjPC0qRNV2Z1ChXoQAtUiyoUyHSrVrNkBTzt+hEAiqlhSYJgiLVp2a0v2XrkEAJDw78NK2wQmxZtYcN59XIsALixgAo1TR5GezdrYsUa+WLw21hwzQd0DZ+lrBYzR6lxRc4Njbiu0suYF15IrWDsZLOV7ZY2LXRhhrgWkIrO/Xo374dEqR4V3nq4cdMQL0SYTj0CaOakna+Njdt1T+Lfn3P/x+tdJ3jz4hVnb956u/qtBhoaQBtfwHyz9d3rdcy/v///AAYo4IAEFmjggQgKiJ989DG4oH2iWVZeS+dR6JABB2So4YYcbrjeh90dsABvAYBInnYkltidcyiSaKKELR63Inuk6cfWi7rFCN2MOBZnY1c9hqfjeDDS2N2PUAWJ3pDvnWgkeUgGpaSFTOo1pUoVYpmelTx2CWWKV6aUpZhb3uhlkTWCeWaO7anpZJgWwdYkmmv66Cadbx55p1L13Qefg3+W2VWChBZq6KGIJqrog35m1WeDEEZYnEANVGrppZhmOmacF3boaYekNTDAqKSWauqpCWy6k4gudofAqbDG/5rqhFpG6ZWrsEKg6666amDqrFWaSd6rppLAwLHIHutBAqUC22arw/7qQbLUQtCsqhLJyWW0pUJA7bfMjupsmtBKSKy4337bAanj6lmubucO0EG63/o6QLtfvlvcuRrQ+20K4mIbkbbCmkvqvMmWIEIJ6fqKr4S2RoXrqP0mK4IAInzrQcC0khmxTBOPivCxf9XLcbBAhnxvsiUnu+7Jz8rILanektwQteHeK7BPe+4rrc0CJGstuztzpW948Y5qLAMtM7DstR1z+rFHpCU9qq5/7Wov0VHvNHVbM5/6V6w6d52toEmqbKpfGJD9sG5fn6Z2qSNgMILbRV8V90YPmv+QwN+ABy744LM22lR+KSqq+OKMN+44gIxCajifgEpK5ZMwYvjp5gfAuZNzrMqM5+hw9ywk5qUfvSTqdqp+uedn791bnrRDbPrqsA+MtpR1nk6u6GzmzrPrWrIY8461B+8u8JOyHp7sKibf/O/Ikz798tUrL/3zt7/eO3rQC48d9kRqbz33xJNpPPXlX789+N2n9GjkD0L/+P3456+/goFO3tP8/bOc775HJc1xzlPiuwroRsS8Ab7PQuEjYPEicgELWPCCFrANXSL4wAk+RAJhkYze4qc+1MGFKjcZYfo4tb6vpGY5GyQhC58UEtpYJYYr/NyTQEAbkdiGYCnrYAn/BZKcHn4Ahw3E3Yw04xjPEGYlHDzf6hjTH8jU5ARIzJ77iqQZzvzFiSRBQRbb58DRnTAswRnjnMxnvtnEBYZpiSIbtwiA34TlOmrclhS9R5CwHNFoSeSj9QqCEINoMI4y1GEC81iwOZYxdYH0oAQ9lsiIANBRlTscEKGyv0568pOMo18ARSnAIU6ShQY8IIcWeRvyhE6Lj6Rj6yJpSiFKrZK6a2G+aDlDVkIRlyfRpe1yeDZhQhKWSrSl14A5PmXGjpkKdB78iJlLaUIQmqzx5SZ558xqsm+Nsowl+nipyFMuk5rBtGatsNnKPa4Tnc10JyXhGU1t7u5W3UznN/VIw7lIjTJQ9gOlQAdKUP5h0p8H9Z9OLlnKcuYzng31pjmLGdFayrOXEeFAATjQ0Lxls07TqcBjqKMCSXm0nWx0419GIkJ1WpSNcFnpSFLoUozW7gQjkalI4DhRidKuhiLRqUhuaE9d0iSnDXliT/XZpSIGNak1+eNSIXo9HpJEqCOpqUPpBFSRiNSKJCHqQ+vppQiQZANg3Kkxw5lMAJwRhWsVZ1vdGMIInXQ0a7IjVfBYVNQ5VS4mNZtPB/mAwhqWrwAICAAh+QQACgAAACzMADABdgClAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dsAenr+iwBKQIn+AADP5eVBN3//0ZqJWVgsGAFwRkYSFA+zazj/vGz/wHX/2q7/6dH/kxAxZWV/Pz//yIirXQD/nCRaRHzUFBRsTG6aMTGqqqrNdiUsAAAoKCicYUr/pjn/q0UAVVUwKFxNWFiqAACuKCi70tL/s1fgfhf/4Lv/8N4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBR44YHBgw8wDBwoQMDChxAjAmgosSJEihYzThRAIIDHjyBDhlyIQYHJkyY9XHSo0SLGlhJfwlzZUaRNkQtToNzJ4aHMmQt/AhUodGjDmjeTBhh4YefODD5ZDg0qdSrRqlaPKlU60IPTnTCoWhU79mrZq0i34gTA4atThWbPFgU6d6ZWtTYFZnC7EwVDrFPrwhTc8i7ekCMiNFzcMEIHpzLiliWskXJGw4c/FmDMOcJOlZLHWnYJ2CjHzIhNTOAswPHOC3/PbpQ9uiJm1AP38lWQguzk0nSB2z2N+uPAtrvhhs4qfHDzwsSLL+3Kt0JU2s8rZ78cvfjCpl+hXv+Xu5009rS4F3KowL59Bdjjf2M/L92j6PK28cfUH/F25vvzkUdffQAKaOBv6P03lQENGVAWgwI4OBaEEloFYYKHsabhhhx26OGHIIYo4ogklmjiiSieaGGDD7I4oYsrRiibaQHKJ6ABB+So44487ligjUDed8ACBDJX44/3YYgXkkYemGR90wXG30pHMqekWkxKWaWUV26VJY1OWgnll8FtaVqXXDUZpJpPFqllmG8iOCabdMbZpnRkDmdmcGgmladzew7X501/Qheoc4PmVSeYa3I5p52LlkleomtBaimjd3oXqZ5wnvkoppsCOumnkjYKqqNunqpqqZmmNxSFLcr/+KKsMVIaUoq45qrrrrz26uuvtS0E66wVLgijsbTOaGinrBqJY4/Q8liododCN2SqzV6arafYcmrqtnyS6u203A2IZ6jLfiuouKKq226rCmo7Lrra2QoSueYxu26379Jb7qj8potvfuZqKm+/B0Nnr3H+5uuuwuwK3DDBAJ+bMLX6Ihoxxg9zDO9hA+9Xbb0b/9uxyR8vObHIGUMcsMcr9zdlVAvbFzOVLZP8skXDxljsq8cC3R1qwBZt9NFIJ420z7H+DFTPyDqtLMU2NmD11VhnrXWwMgPwbLRghyxzAwOUbfbZaKedANcrXWvxqsMhkPbcdK89M1U1Rwm3c3Kn/y3BBoAHvoEGaNtdsKt7Q9f32Sww4PjjjoOQwNmGV2xw4totXnYCIEDuuQSUs01zyQ7fp/kAEniu+uSbi4436VSbTrnqqotgduVy7lw6c5q7QLvqhA+Ae8pY3hzV4hr8rjoIrd/9V95ir7S4CJ63UEILtBM+vJi6x8672clDXoIAJajuQvOHx4s5d5pT//hiwKNvOeLgxj37+w15brv8ub9dP99oS53j4Ac51gnPdc+DHct+czrODTB/jwPd7RCIFgV2jTynG0DjGEBABkgudM6rYPcWKDu/SWAxEkhh8CYYwo1Az3hUyaDZFkO3A7bQPyCD4V9kWLbVTKCG20OV///mJSUeDkAFE1ABECnoQgs+BGpCe0ECpkjFKlrxinaTGkwuBCWlefGLYAxjGJlGrKaZcWoSuxjKnAW2NkYvPm+kitsu9780ro80L1Tj7rTFAQv48Y8WUE4CR3jBk+0RAA9wC2heR0icGdJ7euFLTxg5RITdEZIW2I14BllJO9YRZiXZjQKsw0k6EvGSJASATkSpAOXgUGV6hCSmwMNKv4iwkzCLJctGoBrWuOYttzSlJT/5r81syDNgKSX9TknMfPFyAqthzC9RMgNlqq+ZssyWbvhiAUoK05PMJCJy3LLJYC5zmOG0pFfcAh9r5lCXhfwSLftCEyfCUYfLYQpCDiL/SHNeM53gRKfGGnlPeDqSeF7C52xyxp08Pi1oD02W0LTYEi7WR4wYzahGNzoiMno0amdE4xoNWlAafa2NPYpjbBhKmjmeM6AwzSW3bLICAayAUAplok6H5hEKUMCHPqVACO6V0xsaNUEnMMliThIDopLUNyo15wdOstSTQIBhT13pI/eDHgpQtSE7IZLesJnKrAYzBCip6kkoYDOzLnSrMksLCdIK1p0MNapvxathILATtZ6EBGMFqEzJGtePzJWuAvjKB/RqVHOhFSWKQSZK2MrYkTUUJF49SQemeZIPBFagMR3pTAMwVVGeoK2ojGdR0ZPU3WA1tQddrUh2c1XUqBI2tm71D1+/AtjX3rakv/XmRyBA3OIS17O+Faxog+tOWMIWuMrFoz2hWtT0vTOiFNUIFLEbq7xx9LvgDa8YPzrRkIJUpMuNbjbVO7WdWvaQ6IUuaAfLXmW5l6XrjW8+n0tdt+a2sfhtL4Dhilv99pe/WsXrf9+bX/3el8DydfCAK2vgsiJ4v8ylMIQPXOG8VjfA9p3whzscYfrON70GfrCGSZzgEas0IAAh+QQACgAAACyzADABjgCMAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dv+iwAAenpKQIn+AADP5eVBN3+LWlaxajorGQNwRkb/0JcxZWVaRHx/Pz9rTG+rXQD/wHX/6tHUFBTLdSYwKFx6UmOaMTH/lBP/2KsAFhYsAACcYUrZexv/yIcAVVVNWFiqAACuKCiqqqq70tLgfhb/nCX/pjr/sVP/uGOlZUP/rk3/8N4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wADCBTo4YHBgw8uDBwoQACAhxAjSpz4sCHFixctYtwIUSPHjQ0JLBxJsmSACwlSqlS5YKTHjxkdwsT4cqbEmjY7ChBpsudIDiuDynApM+fNokZ1Jj26NGJIn1ADeAhKtcJCnEax5tRqk+vMp1F7tqAalMNVpEu9wlT7kS1HsGFJPiBL1QNDtEndgsSble/WnXFJLqBLtcTdpkoRA9BL029XwIEHdmhImXIEDHUFMo6peLHjr5/XQo4coLJpARGotiwdum3rt6/3doYbefJp1JhXPjiseDNF3xOBH+VJWuBgwikN80Ys3GnsxrNHFw8wF7nC5U2bJ2b+PCbx6QFKEP+GQDS6+d7df0ufjpKu1bPnucfPvn76Awj480PYXR79/LTpBVcfeGFpV1GATPmH3ncExmWgZ//lhaBzDDYY1YMYTrgdfRVa2JMDDRmAmAEhjlhiUyQKICKKtAV224swxijjjDTWaOONOOao44489ujjj6YtleKKQp5YpIomIsmikkYd4OSTUEYZZYYR9mWeAVJmGeV0C1CpoHzoHbCahxdqeGCVf0XXIZkleQnmmxyyWaCZEH5Jn5pylonmY3uCtmae2AHYp2h4AtqTm3faCeCfhiIqqKISMgqooxIO6pqkeVJqJaR9YSqnpmly+penbILKp6iPkUqmqaBZCpuqHrL/Siiqfhp6KJ2yulaorSTlCpure8Fqoa+y0SqasA0SC52xl/LaJq7QAtsYsgQqyxmzrzo7EohMJjVkkkR6a6S4A5YJ5Lnopqvuuuy26y6PR4Zr1LdLypsTvfF2Fiqcj/JbKZZaBpyov5sS/JeYxQ2scL8LRzpdwwVDvG+cCTNscaXYBvvwxRFzPPGiG2Ns8Kkj11qxyBKTnLLJpK08a8kvU9yyxyrT3OqCIXeMss0xg3yyzkB/vLPDPwsddM1Dd5qz0UwjfTTLkbmsa8bLcket1L9Sfa3VSzvd9M0wN1u012SDjbXGY5vN89RhZ5t2z0l/DbfSb7N9dtV3e9c1TPiS/2uvTX3PO67g5UL17uGIJ6744owDmW+94EYOeVIBV36Ateo91MDmnHfu+edUAmz5lsV1GS16DQyg+uqst+66AhkirG2gcT+GgOu45w47tNSCh7mAt7sugQXEF28BBa3vvuvsrEkbU/Cso8DA9NRPv4ECrCuPM/Oanc4d9KorsEH15EuQfYa9T/f7UeAPIAH58GMfPvrcd++8euArAD/8H6yuPdf1W59zwKeC/cEPeQP4n8yYJ0CdQI8CBoTfBubHu/o1T2v4W90HyJeCEaRgf8hToM+418ADPZB8IxDACOCnAgoub3Yl9Az4Nkg9yhzQhdsjoffok7/q2bB6/cMhAP91eD/gte590/sh9eSXQPoFcIcAap/4ktiQ6pnPf04kIgaN2DrpMUCJ1ztfBZ9YRPbhTgISoAwaJYBALI5Ri23bS/tYR5ncNfGNDISihOa4OhEIQAR2FCHR4Ji3DObuBCI4QSCzODtu/W0mgbtXQ1iggEpa8pKYzOTuHsm3FjmocaAMpShHScoePe6UfpMcKm0yOi3F8JUOEV0rncQlWJbRObLL4y03ZLMVFGAFaCPk2rJmMBNkIAKoyYAyNSCg9BXHlpyaQEoooxIXNNOC0CQYCKbZkJWYYDjY1OPT4JYBlVBTJSAAJxm3mCB+aWAl51RJDCgUzl2eSVEzgGc3g8L/zAM5kzTZVFgIghJPlbxAJ/+MTEAZBgOC7pOf/qwnO50zn3euBJmpCQoMECrROOLNYjFYCQYuQxUacHSdHt1aw7aJnAzQE6WFFFCETICcdL5UmLVTW9JcQJgQqBOn47QbxwZKloP+VJcT5WXcQjCBpjp1Av28KVJTmrmkRhSmwywWVa+J1ZzOTW5iY54jVZnKyR2JWqVMq1rXytbFrVKS3SIcJz8SScANbiazlNJCvaorWc6yluIEKzHpk0sYBrZsX01VR2PaTsZKdSD60c91qnVYnQbVbQshAV1aMKzKJtayoxpJBZDDH995Vqh8xaxAIICcBIzpmacdbFantZD2cyCHPKa1Z522elSgtDYBkwVobLXq2JNS57cpMYv6hvvR1GrMNqchKVVya9XdFjeiMcroSnALW93CUiTQNY10V/Ke7lb3uwsZbWtJQFnvMldvmW1tst5bVd4+NgCkna973Vuh6lBFue09L325OhKEIKS8AQEAIfkEAAoAAAAsswAvAY4AkACF/v7+RzyKAAAAAICA0M3cOzs76ejp29nn/osAyNnbAHp6SkCJ/gAAz+XlQTd/sWo5i1pWjDg4LBgAbkdHa0tv/8B1/9CXWkR8DRYSL2dnql0AynQn/+rTMChcelJj/5QT/9irLAAAUFZWnGFKqikpzhcX2Xsb/8iHAFVVKCgoqgAAo2REqqqqu9LS4H4W/5wl/6Y6P19f3w8P/65N/7BR/7ll//DeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAwgcKLCDgA4EEwoQAKChw4cQIzZcKLFiRYoWMzrEqDHjQgIJQ4oM6QEChIUCTFIIybHjRYYuLbaMCXEmzY0CQI7cmZACgp8ofyJYoBDmzZpGj+JUipTpw488owrcIDTozxVFnS7VCsDmUa83oUrd6UEoAqs/LxAES5NtTLcu4XYUO1akC7NoEWxYm5SpXI1/PfZVSrcuQQhmzy5M7GFgYJmDv0YOO7ltTsMJFyRWLCCxCceV34aOO3puacCXMQ9csTnvz8YBHr/k2vW0YNqFMftMLEGAhM1EY9uGjHv4bK65MT9IrEGChsQQQBefjty4xOSGNW/2zJe6VtnXrUf/xG4Y8XahK6VX9+4U/PjUqgmaOI/gQdb1+L+LR6oz/sDd26nVXX7t7feUgTj1559AEDzg4IMPRHeffuz5heBE8C2ImXtNEWhhcQpqWBeHB1ZI2IW1hSiiVCRuRaGHJ6q44k4OLGSAVgbYiKOOTuUowI09khcVSkQWaeSRSCap5JJMNunkk1BGKeWUVFZp5ZVLMuUjkFry2OWPO4IZpJhHHWDmmWimmWaLGJoo2XQGqClnmv4twGZtblJW3AHBzcgiincGCmiGfvIkaJ6WgVjoWIfC+GZ1Mi46YYGIiqaopIYOWilpl2I6UqMvhlpgpJ4Kt6lpp6JGqqegUuooZati/9rqh69aFquks56YqmC3Lprro6Ja2Guhv+pZq2jD+llsoseSluyMy1rarGnPrhgtp9OqWuqnmmZ7G6TbinQtqt5CVq2I46K2q7nhstRtsLqC2+5ANZKp1JZhcnmvl/sSOiKWAAcs8MAEF2zwwQZ/qe9R+I658E0NK0ybsfAC6yqFcc6p8cUc01oxZXzG13G8HzOr37kBjGyxxyrD6l/LJsMs7ckvs2wzyTI7WzPON6/Mc4w7+yw0xTlTGzTRPSP9s2QoF61uucfRLHLSMVM986hHV7200kPbmvXVVmNbMrJfi+30t2PrPPXWWncNNtBru2122ORKrdrZxEEdXqeY4f8dtd97y3s33U+nXTfWcXOteNuLk50445C/LbfRj7sUcb8P03Q5w/xy7i+jCIcu+uikl256wpjnq7rDq5ep8evpog34ewBk/Lqadcaet+GohTyvQLr/TTivv6vHu+zDs1u8qXrT3jx/yzN//O6zQ7988IFP/xLKC2LvvPbhce+f9x2C/5748ZFf4vMHoq+a+i5W33708LfJfoL0vyt//MLmv67wbHNc8eqHp/thyH0b0l/yAMg0/xmwgOaz3gAVGMC5wW2C/8ve/g7owAiuz4P4ux4FJ1c4xP2uXpmLyeYg1jkW2stzKDudDGdIwxrakGASY50Oc8jDm9xOYwTESAP/hkjEIhrxiIey3Q/PlLsRUqYBA4iiFKdIxSoqQFC++10QBZCAKnrxi1ccFAINs8UuVnECEUijGiMQAyqGsSInqIAc51gBDpyvgxusjRmnKAIG+PGPfpSBAqb4xohY4DwvuKMIM/iePUZRAYCMJAMmQEivfIA+J5CgFp1oGUcOYAKSjOQgH2mTCtAHAR/Q5LzKSMhQRpIEUiykQzhwyp/UYH6LfOBCHEkCV0YyA6Tsywxq+RM7cjCXIMTQHjPgy0iWYJCyBAAIiPkTGIQQg7rkohQjEEkVhEAFoQRmIVmQAgwYyTnbMaaQWMVJ0SwzkiEQQAgkCcsBFLIASfpNYmhw/81NMhIpjuTmH1EiSWDaEyPkxIA5iYTOxNign6tsJ2kcCcmBLiSSEYilTS55ygrgEpvJ1CMVQelHggJylAcdzAlOmcqP+jObnlRACUp60T9SUqOReQF9QKDKdrGSin1kgEkZ8MxKRmaa27FmT8P1U6BOACUTiKpBcVoREFjgqli1gDFdGtF/HsiTU0TJF1PKt67CdKzmxMBYo9k/ZOZxl2NFAQZQsFZBjXEsKGxdv1qggL769a+ADWwYU2i5de7khohNrGIXW8Meeo6wHVmh5lo42RfGZIly2qJXl6JEzDZxs/YL6UKyaFbRSpRyIH3rabUVEghBKD0a0mw2VTSC8764wFqrRV4FUSuQC5xSQt3LLfUWGL6ELOeUfUqfcBnYOLUJBED0sU9wQQtB1SqIKsSE7fuWq0HiKjIA5iHmXsbH3e9ZdyDYFUpznrOZ6WaTunSBLgJ6o0+zSFe58C2vJo8LlMUkRkD4fe9sCeLbqvhXKCOIrX4/eF6C1La/nTELuhbMv92yliAGjvBPgEve/OZXReZBy3gV7OEBh8QkKDEJBADsXtN++LAL8RWFQ9tgccWYWDOurneXWpCDiCQgACH5BAAKAAAALLQAFgGOAKoAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev6LAEpAif0AAM/l5UE3f7RrN4lZWI43NysZA2pLb//QlyxoaFdSUqpdAP/Adf/q0VpEfMt1JjMpWnhRZdt8Gv+UE//YqwAWFiwAAJxhSqwoKKBjRs0YGP/IhwBVVWNNTaoAAKqqqrvS0v+cJf+mOv+xU/+4Yz9fX+B+F/+uTf/w3gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHBiAgwAOBBMKEACgocOHECM2XCixYkWKFjM6xKgx40ICCUOKDNkBAoSFAkxOCMmx40WGLi22jAlxJs2NAkCO3JlwgoKfKH8qYAnzZs2iRnEmPbr04UeeUAVuEBr0ZwmFSJfaNLr1ZleaT6Pu7CBUQdWfGgh+jbnWZduObzWGFSvSQ9mzCh6ozZo0rke+XAF7zUlXJISyZhciXinQr0zBYCGzleyWcGGCCxAnFoDYw0DHL5sqFQ0AtMS5lwOU0Iz3Z4fGlOHGljv7L+nSllMH8IlYggAJmhfAvm06YnGmpFGnfoD4goQLiCF8rv2YOPXQyXPrzqy5817r4LNb/9epW+Dh7kIZDxfPXvRxp9rL20Wv93v7pu9Hu4+vmyz6tPbtF56A2ZFXnkAlPKDggg9Ih9WA+F13moTG8XegWPlNRCFyBO5n4IV0ZYgbhFptCN+HIEYl4oom6hchiinyxCKJfbWoIYwxjrSQAaIZsGOPPzblowA8CrkQAsLphtKSTDbp5JNQRinllFRWaeWVWGap5ZZcdumllASEKeaYZIqJQJBLDVlkmmgmpSaQRB5Q5pxlbjfjfSVad0CSOUJ1Z4cRjtcnhjaOiGeNgg7qZ6F/BlqgoovSGJikg+EI6XqA5nloYJZe2qimmdbYKaSfIrpppZfuVOqkp0Y2qqKrDv9Gqaup6sjorbNO9uqgsUaWa2W79tnrZL/KFmyOw1ZWLG3HxpisbMv+1WyKz9IW7WPTglitba3qWqtI21bXLbDfEnUtdqFyWu6D40LbLrPrDuRAm0a9aSSRcK7ppnKFfenvvwAHLPDABBdc8L743qtvvfTeZC+bCSd1wMQUV2xxxeGi66iABlzs8cV24vout/vtGS+mG6cM6osno7zyy6Z62HIAGU947oTZXlhzhTdXmPOBO3OosqgzBw1fz0f9XJ7RLsKsbstMa4j0iUWLnK6sj0Jt9dCsyqz11E3HzPLXI4t7Na1kn01s2S8preTWTmPt9clRG6o2uWlzLbfeaNP/DbfYcfcdb92EW1hr4WDf2PK8ESO8sMMN0/Qwwj8bbPnlmGeu+eYHM9y4549LHnlMk4N+296Br303tB1/7PoBqSvLts3Zmaxb7O6ubm3WtwPue9d8e1se7rvrTvLYvQNP/PHLY3tg8xpDT/vcqUnP8+zXU3+Z9UJzfzTv1f+Ouvi+gr89+aoHL7v2hXkftvLo4538+PDXTz+qw8efu/r7l/iz+1LDXveIlj/7lc94ZkNe+AyYPgDaTYHnY+D6+Fc8CLZPfxV0IIv+h0HmdTCB/nveB6M3wulZkC6Oy5cKFQanynHuhTCMoQxnGLAUsvCGEAsd6UbXkde5DnECPFrr/3wIstQsAIgIfIntBvc3CfaPgH5L3AM1aLhUIZGC0qqaFAvnttRckYpdvMwXS+gzLQbxffcT3LrG6ER45Y2KW6yip5qYxgZCkYlxjGMY+0XHA2LReW8k4wDbmMVAEhKEgqSaIes4QTCaMYkmdOQi/QhH85WLjYw01iP/SMJDtm1xPOxI6SD3OVLq0CVDciENV8nKVroShjbM4QpliUOjENFjmKQkooZ4S4qFLI9n1NAS19hHOzJwBQVYQSGjGMwpiu8EGPCNBDBAzQyUcZLGzGQGG0IBoCjmJy24JjMhmT2ufcCbnBHKCZK2yUqGCgNU+eZPPsBObDYykU3LwF3k+f8TGChynJyMJOBmsM90lsWairPnEz1pQhAgpjUKcAFO9hiiYt4TeC94KD+FglB+fSuX6dNnWaSpmRdMtJ34DGCmYFAW50AHMTQ4qUK3ydByNuSc6PkJBv6Jx2aW6gQ5VQA9eUpMYKarBTkFQT0B6k6uObQ7El1qT8k5yPuBgAJYzSoFEErUS1p0odpc5lQDatOU4oaihDIqWaVaLsadUpSh1MgoRVdKuqrylXjNq173eqVY+tV0tQSsxHqJsa/urgGITaxiF8vYGfGSsL/0qQAaMIDKWvaymM1sAlg0TK/GEQGZDa1oN3srtKrIsH8BLWYTEIHWura1FljtBkXCIAb/qQdoqH2Mai9rAQb49re/TcBlSZsogowAPTKgVm5fstvKJgC40GVABIY7W4JoIKgOwu1nMRuB6EJXuM6t7kCYE1Q+vW27lq2Ad6FLAssSF3y8CWp9lrbcCTWXBOuFbgXCW1qCTCWoP7mtGOtbod3GIL/QNYFw3zu38wBYARvQrmR3213gqkAEKvDufhmMvP8KxTlOkgBCzjvh9EJXBAIQQXTbOwAOh1Ag8f2Jb6BEX/RWtsK+RUl099ti8QaAvD8BsZNG7EUCH6W5z/2tjoE7Xf4WNwDXBfAIdGZk+DR3AClQ8kK+614fB+C4QdVWlXFy5QSYIMdb/m0KqNvfhGBXYMw2NjEDlswABbP5yeZBT4ThXOLMWsACKPmzBXjc5TYnxCSIRjSAqBxnzKJEtD029FipeOXLhkAAIYC0i+9Y1D6HFgUhQIGmvfwtt84SYSxIgKpXzepWu5q0b5WrR8USEAAh+QQACgAAACy0AP0AjQDcAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dsAenr+iwBKQIn9AADP5eVBN3+LWlaVNDSyajgsGABNWFhrTG7/wHX/0Jd8U2K+cDANFhIna2tbRXuqXQDJdCj/6tMzKVqcYUr/lBP/2KssAACuJyfLGRnZexv/yIcAVVUoKCiqAACjZEWqqqq70tLgfhb/nCX/pjofb29fT09jTU3/rk3/sFH/uWX/8N4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wADCBwY4IOADwQTChAAoKHDhxAjNlwosWJFihYzOsSoMeNCAglDikxI4cKFhQJMUgjJseNFhi4ttowJcSbNjQJAjtxJcIOCnyh/KmAJ82bNokZxJj269OFHnlADdBAa9CcIhUiX2jS69WZXmk+jjqQgVEHVnxsIfo251mXbjm81hhUb0kTZswoiqM2aNK5HvlwBe81JN+SDsmYXIl4p0K9MwWAhs5XslnDhgQsQJxaA2MRAxy+bKhUNALTEuZdXaMb780JjynBhy5X9l3Rpy5cDkEUsQYAEzQte2zYdkThT0qgvR0DMQQIHxA8+0348fHpo5LhzZ9bceW/179ir6//MLfAwd6GupYMXbdyp9dPZybs4r6AD1vVN249mHz/3BfppeRfegPyJR15CK2Cg4IIYRHcfgfm9V5yER4134GX6TUShe/hp1d+FUWV4W4d9bYiThSCKJeKKJmpoYIqFsUhiYC/CqKIABohmwEI5NrUjjjryGKQACASXG0pIJqnkkkw26eSTUEYp5ZRUVmnllVhmqeWWTRLg5ZdghvklAkL6WOZSP/aI5plJpXmAmHCGSd4CMkLoYXUHGGkjVHUWaGeJKO45Up8RzjhYoIISZWhki06GaKIC+ilpodg9Cqlwf9KY6aGX7kTonZtGZumln5bYaGWjQlqqppN6mGqiqw7/dmpsrwoaK6OhOtrpoC2OmGtlNe4aKaWtmlqpsIr+Gtuss9W6562TMfuXszZCC6yyzSL7YLGsEhshtTBauyy202o7LKjcynqsuZimi6u7urIbgLizSfsYuCmyaVSaQ6rZpr43/VgkeVwWbPDBCCes8MIMKxznwwSQCaSZE69Z8b8X78vjmxDDOSe9tZH7WJ7ygkydyC/hC6LJ18GLqrzz9soyfMFqO/OE9qYM883HuUzrzjIHnTN8Kl/IM4coEw300DgnPWHRBx69n7euLu10z1QDarXP9V7tHtQEC+311FWXLDbXIfMH9pFnZ91t2exK7eLYGq6NYdvous2p2Uxj/5033ObK7Sva926t97uHx2uuAwDTxC/F/mqcccDJ0dXw5ZhnrvnmnCuMceSUT+544zE9bjHoLh2g+uqst8664LAzZIDrtLf+Md7GEv4SyXHj/nbu3xr+N/DDB2Z3jL6rq7vSfNM9eOIvN79809NXKDzx2P9u/PXaKw/9z9J/33X1X3PvffHnax0++oizr3jgybef/d699400+SeaL3/3+9MPv/1km5+o9Bct57HoeJaLXwHxVzcCXouBzwOczRT4QPGVa30CXKAFC8cuxomudKRziek+N6Side6EKEyhCld4MBJCrl8wfKFtNOi+CtZwWbOrnQ5vOL4NtixCvLsMD//T5kOarSs3QzwZBA94oCT+0IlGVFsTM2hDKo5LiuSBIvWKuEUJ0kWLfrNiD70oFjDej4th3F4WxUhEMwZQjUhkoxLReEYyRsWNc1ti0IqGxwjK8YnqiyP/aPjHKAZvjYOsYiKveEhBpm+RYyzk06YIyTZKMo3+K0wfY1ezL16yjp98YyY9Wck5bnKPlHykKvvHyvdpUnKoA+EHRRjCjoxQciZkoS53yctentCFp4thMGVoFB0ak5MGlJ0xa3c7AOaRjjgJ4gSd6cdS6gyD1jTkKRF4I2rGjpshoiAjt+nAcYbymXbsFDL1CE4+iTOS2ZwkNldJyHhab56tVCQ9o1f/v2R680O7Wic0G4jPeu7TnHD8nz+T2U6eCJScBdVnPhE6SmE99Jy3aain3mlJe5YvohSdKDwrGlCOmhKjlduVB2NJy1naspYauWXocunLmtr0pjidEjB3Ckth8jQmy6zdRT2qlBwGlXXNXKgepYmsoR40WyAdqUg7CoATVOCqWK2AB+TZTz3OyALneQFXFerVTIWAPie4Z1cHWs30VYA+CgiBWsnK1k95AK4/scFH13rKDtEArz/ZKkH5itK2iQCwP4FB/qJK1amejAUpyICSnMMdwaa0pP8ETwGa9BvE1GCxhCUqOo0F2QxIFkmURcwNQEvXvv7prHCtwF5bW1gS/50ArnKd7TSV6rYX0EcEc91tWbl1WO4oNrhNNSkguycCCzj3uRYQrG6Tm1mGllOqBnXsNUP71MZm15XCWqlPe0pMjNE0p+hNr3p5+dPRuTSmMM2ITN3L0owc1XVO1S7NjHrfAyR1uHUVAFMtqlxtFlajvKouO0USgQY7uMGMMVqBu3jgkIDgPC7I14Qxqd+xBsAn9HFQ1DYMStFmNCHLgaue2KbgAFtoN3DVy4hbDFGBTAWwEb4bjStcHsTWZ8a8rfGNhdKcJUkAyACuMYx/0hsmITnA/7RQin9S5CU92bULHgiI4XqVK9fWugS5MFxXRmJRdhi5cBVx2HZs4sqZR/8z9iEzm7vLwYQ84M54vnOAJDznM0+XwH3+Lj9p2+YyD1Zb4i0veYdpsfOu99GQjjTD2ivL+sL3vfKNr0XmO0MDF7ohDQi1qEdN6lILjs7LHUwDBsDqVrv61bBOwKn9bObJIADWuM61rA39aezc+tUJgICwhy3sCQB71oIOqa1hPQMGOPvZz06Aq3dNTVR7ejC/bnUCoM1tBkBg2siWaLJnk21WQ6Db3NaAtsOtbHFXptwaQDe3SbBuXlubwoEpNwnkzW1jD4Dazrs3h5fN6gnwm9slkDbAISjwEmO71eeGtgpGoAJ0q3vhbG14rd/d6nhDewQCGEG36f1vdmPX3bGUKXfEnY2SbseA1Rjvo8ZHm+9pQ7vl0P42zE3uXZST+9UyeDbOo13vatOa5g+fdglYvhBo+3vn9j56WyNTblZ7fOgJB3fUx93zlON6AhNACdgnoG6tG53rJ615rlGS65Jv3edd/3muJZuBtsccozOfOsFxjYIMoMDuPK8Ip1tq6UwLoAUJSLziF8/4xu+68JvWdEQCAgAh+QQACgAAACyYABoAxQDYAYX+/v5HPIoAAAAAgIDQzdzp6Ok7Ozvb2efI2dsAenr+iwBKQIn+AAA8OkjR5eVDNn6JWVj/6dEsGACzazjCEy3/lBH/rk7/wHX/yor/0Jd7K2P/2ayrXQAbEwxaRHy0GDf/nSb/4LwwKFxnMnJsTG6JJliqqqrNdiX/pjr/umYsAACcYUooIx4oKCiqAACjHkTTDiD/slb/8N670tLgfhcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wAREBhIsKBBgwcKCBAg8KDDgwgWFgBAsaLFixgVCihw4KFHghE3dvzoMeREjChRaizgAIHLlzBjypQ4kqRDBAsC6NzJs2fPBQcW5vRJ1OeChSmTYlx4YGjRpwGOCmgKFapUpVgpLnQwoKvXr2DDJmDqtKpRs1AJLET7FGlWpQsJsC0ady5Rt29TLkQQtq/fsQLk2h2cdi3hnXjzLg18GDHjxgESK7a416/lr4AFQ4asVsBmyZO1Pm5cFzLo0JUvX868eXPnzwJCL9Z8uDTp2LIpM1S92nZrwq9N484tmjZh38eHE0/Nuy/r34eD3yZeEflg63ZPT2beHOxz6IOl1/9Wnhv7XPNstSvm3t3rd/BzxSenXhy28evkZbNvP+A9fLTy4UcfAOihVaBZ6uW1X3v+/VdVgNnlh9po490X4YALdtegg09BeJ6E21GYnIUfYrgbf5gdyOFOHqYH4noi4keiiybyhaJ7Kq4YQIsGvqhgjBHO2GONN+IIpI498Yigj2/l2NaRNNKXYXMbIsmTklUl2CSUPQq5JJFF9uckh1hCpWVWY97F5ZdSnlhklVbqVOaTJnqZ5Zp3gvlmmv/NSReTaOJppqB0tmnjnoSSadh0bdo5qKOFUjclb3DG6aeaddoHm543VorkAiHBdlJuGuEknKm3jSpbqQm06uqrsMb/Chiq4y1k66245qrrrrz26uuvwAYr7LDEFmvsscgmq+yyzDbrrLAN2XRQQntJe5NE1K1Uk7Ug0cStQSZl6+233YpELkhl2QWUUI1d1SZVh0kFL2HuSjpVunPJiy9b+8a36HiZCgfpnwHfNrCDl/p0JlZ89tQwTwvDlaiaB/f573yNaipcwRXG2eHFAmYs8KYiG+xxUQk7DCjDEyvcssocj3gyUSlDvLLEFaucs80xyzizTzU71nOQJNu7s2NH/xa0ThEn9TDSRS/3ss1Jt7Z0ZDc7PTXUG5fc8c9Xgnyh1zJ3bbTGYMspdolk+2y21FU/rfTaUZ498tvlbc203lbT/z1k20TjrR/fcvftmeATxk141notXrVrfrNpt8mIh6j445xFnifgHyo+dOdps6j5oJ+76Dnnpoeu9uGMTv5163CjnfbVTTd+edR536467YyjVPjvvc8m+8+gjv6kqqGV2m+PtCaH/GTKn7r8ks9Wb/312Gev/fbcd+999dGSSy1D5w4ULnHalk+ASdt+ez6pErUk0/zzj3tu8+oGJcD0VtXu+73tIksAg3eRrYRJTACMF+RYBzDUdQl3gztUpwq3QAgmbnghs5cEUeQpJPGudA+sHIw2yJ8O6uiDDlzS6TR4QBOuCIWuKxvs8kZCBlEwcwzEWAzdNsMItvCGjYFh7P/u1sMJ1VBDQIyO8QiWwjutUGpHpFISgbNETDXxURYMURQpNcXwVNFlIFRhFkf4Q75VUIQ/0l0RtVhGzAXxizC74pOeSMM2qm5HcOSZHP9ERx+GyYWKymEGh0i5NZLxj120ixBzh8GxsRCRZsThGNPYSLY9ElFuVKIgHUnI1zXwkhOM5Bs3aclOyvCTULTj7vIotD1SbJJb2qJqAIkwVu4tjE6EZaBk2RtR0itUwnmeYqJ3G/xFSJh5YZWslhmrvfBvUN+LpjSnSc1qWvOa2MQV/bYJEweUipvgxBb67Ecu9qnvfasip/vUyS0EJOCADljIOxFlonkNRl8DrOczi4L/z3jNs0gh+ScH/Sc8IqKSkQYljEBRFFBI4hKLaNySGg+zUP40lJ6udFkfL5jQwVS0PRcN5UPnqEuWTVShBwzpQAmom5MOEqGFpGhK5enQjOqspDirJFs+2h2VlpCgBXQcTrXmUrvwtDk+tSFLqyPUiAaqqHM5Km+SisSl1qejnISpJ1EaJqpK0aoEaqohKYnVqM5UAFLt5Uj5OFTb6RQtab2MV7kIVuBt1HJvNUtcLTPXWQK1pXnd3A4D15i9+qWvarUp1dr6P6ju9KyGFctfmerYvw0WdJCJbFgQaxnArPWVTjVpYKGiWbBw9i+TvWpMdWhKHsq0qzTF6GVTF9qc/5b1sbBFa01nG8KxSnS0TyntV07rnNSGtbKSay1hKcrM5rYqoM5trjjhx5B9/smYH0LmW4g5Huy6KJvgDa94x0ve8m4vfN8aH3rbOd10mqt85oRve5MXP3Byk53W8q5Z1rW/fL7LukYRYLyMa0BI2lNdZ/TtU4HLRFCuNJNUJGXdlItZBbOMl51NpL8kbFkK09bCEsMwan0ZYcYWdLUv9SMm77jIwSFXsKmEJIS9yOHkavWUrK2jjFlsS6x9VqMmDqqIi0tiGgcZsLftsI5XvMoaw/jGrs2xikM5Y0X2mMBiPeiUH8xjJ5NOsVwDsdaGLNkiW9nLkfJwb7VsRFWGrv/FHEVxVrf8UzNv+MiUZbAVHVznKt+5tkTVMxgN5ebZXbmuWZZym3fcZDyrdqspXjST33zoH98U0I0js3c0zBY44zXJNqazUv3c6UqDeW93PeSkwVY8NP9Ju1nhbnL0uyRYY0WZ0V2mM4Vj3l77+tfADjY212st9Z5zvtDDr7Tiez9kD1PZNmF2OQH8E/1Rmyf1ktqB8yVgemF52/zq9j0TzOZPy7mUUF4u2DwNoxd/mbdibDSmGyvoOMI7l/IWs1tB/WQX1xs87CbruSec7gob2tV7VnO89U1vfp/Q1PeGKMNPDOmZBfy3Dk9zwT98cEcf99+tjDhJ813udoP8lqf/xhqpAQRxheN74kF19wtbvvE1K9rcFT/ZxRec8Qa7XOIdnzfFcRxpnBPd4jT3d88TXvOFBx3mSB64kpUudSvtXLRLH7TI2Urym5s86/b++cgpjXCti53rUM8z2AGe9DjnnOBUf7uVWi0qcVVXesG0O63vtHdoCvvvgA+84AffK/tu05vVku9G7M6RY7+32Ysf5+PLCW2SuBOe7BowosGNln56e/PX3onn7ylcrwAz7Y8+OrrjrnrcArTtRo9y0b9e9aqUviunL7nA5R5qt7cermf1uF2Fn2iPBl/oMT+5j1NeuNsPIPde3/3vex97dRs/t8SXuc+b/vLXvr7sYec+/9Cjj/Hak/b4qP/42kN+dtAWFv26Lz/v++172RsV/uTnufk1znr7mxX7yBd18/du7QdkmYV/syd9/gd39Wd99weA6Td8Aah2+1cUzgd9CSh/00d/1WdwXPV92ad8WKZ9RHGBsEd7A8h/DeiB1weCE5h6Czh1K8hxH3gjGDhnM2hzGah/KWiBCIiDHUiDO4h1FViCubZMN5hdehd6jtF3x7OEeHcbhDeFVFiFVjhs6mNsimdrSpE+W9g+7BV51NV4X3hOTBgV1uZf9sJ5+yVu6vJtZzh6CCZJL6h+RWh24jd2Txd/PLiBBJiHaLeH+UeEPch0/eeAOneCCoiIDBiEOv+IdOCnR1vnfnxIiH44c5HIfoBIiYNoW3cIH1fniYWIh4fIgpAYguuHcpNogGSHip8oiQV4aa1YhxIYgcV3irR4i6uXg063boqogTFIfSh4iYHkiqMYfqUohLhoiyRIirzYfb6YiaoYi4uVi80Iir/Yh8HIgcO4jQ8njcu3irJYiaJIjLUEjiMogoh2jWyHjuuojpZWjbPIjPDIfHbWeUnoIlyYFLKGH074alBYTGeINVdYkAZ5kAiZLPuYEisheQupEs6WTBG5XRMZaxV5axfZhSbSJhvJkR5pLx05ICCZFCZgACagNSEpNSmZNyspkhOCEhEQAiHAAgLAAjIpA4v/0ZIv+ZEqyZMuuZMXUQEKoAC2MpQgkJM+OTg6GSJL+ZOBghExMJREuRBSiQFB1ZQ/gpVbopVO2TgXEQFSOZUCIJUVcJVJCZQ9OZJq2ZUwchEoEJZFKZUpoBtcyTJ1KTF3yZZMZREbEJZi6ZcRsJdnyZSD2ZaFqZf/YxFCCZdUGZYoIJhryZKHuZWTiZh0SREY4Jd/6ZcbIBp56ZWVaZehaZlhVRFg6ZdxSZaQmZasKZmRqZSk+ZQVkQJ+KQECIAF+aZWeOZooyZug+ZqxiZQVsZhDyQESwAFheZSr6ZqtCZvASZjB2ZsVkZmaGZYhcJnPaZjZmZW+qZegAQLVOZQW/2CW20mZ5Smb5xmboNGX4RmY2NmcaMmc8umc0ZmYGJECFpCf+mkBuvme8xmf9Pmf0Fmfwimg2gmfA2qg3EmgBRqgDgqgEJqgD8qgqYVln2mf6YmXDNqgEXqgCmqeCOqhq8J4DkmiJXqiY2ii1JWQLNqiLsqixCYtWgh5D5kRGcmPN8qQOQqRk7dOPdpOcZiGmnehHNqhP8KGVSGH+UJunSidIbqg1BhmgjiEGpqhTrqJrDilQCiiRgqiWDqO0Uh8REqeUYpqXSam3VmkjtiLPxOKVzqhXLqm0Nim2SiaVvqbZapyZ5qLY+qfyfiIieiOfbqccLqIphioaHqnGJqnnP/GcoKapmT6odrIiJZSp1X6pF76p2y6jOT4pl2Knox6j46aqJgKql8qj1q6ixL6qXYaqitnFm6Kp5LaqqcqpWHKp5Dqp884fnT6qIqqpnHajZRqdZbqqat6rMJ6qB4Tq4s6q5daq2YaOnSXdyjqXjV6EQ0pef+oJtvqMi/6reAarn8XozYxo5QnhtaqounqkGCYX/VlePMTpJn3eYO6m78aVEjaPwLAFX/EpFRqrMEKpdCKNZqGGf66pQJbqJm6q09SsO5xsKoasAuLrIbqIg7bFQkAsY0osaaqsJP6IRfbHxorgxQ7sRxrif7YQiMrjAnLqs+qqXynsnRoi/Vamvb/GLIZO7OdKqseS6swOyg4u7Lc2LIl27EMe10yO0qk6qwAW7HMk7SatLQ9+7JHy61QW2K4eq+6KqdAe7VGlrWl6rNV6zJBq7NNyrMu27TAGCFlq7Rgy7Roy7UN67VnJrVpG7fJarF0+2c0m6uEOrYq07ZR+7ZTq7Yfq7f9arb/irdEe7LlmLKJ67Z9q7V/K7dIG7m/dGjXahHZmqLVSl93V0xHqGviWrqme7pYWD7m6qObWxGdu66eO4btumyV9xHdim1CSq9+a69hKzH5+hRKyi9C+4eFy7gm67SbiqiEe7fN+rN6eKuT27uGi7LD6kHFarxGW7TUq6yV6qvSi71i/2u5nNirdqu9VCu+WQq9O9u8zAusyDunnHq27Gu+0/u41fuN5eu49RtoqWg4+du4ALy23Eus3gu386u/+/aK0MGs7nu8Dny4gLqs13vAAZy9eRvB3fu/Dxy+F5y8ElzAxUvBG2y/A2y9INy+kRrCQ+eNmKjBFlzB26uMyhu9BtzAEOzBGby89Au+JCzDbPEAOATEh/EAO2qj6Aq6rUsRrwu6SUPEeuPEqBvFUjzF1pPEALDEz3bEyabFWWzFWNzF6po8BzDGZFzGZnzGE8EUZ7zGbGyhu2uzIMkRbDzHZIwUckzHcyyv/TWkb+zGaxiHbrikiouwI7y/PMy/Cjw3J/+8wyL8wgLswzlMwyqcwoCLquorvzbMwTDcwxhMwC6syYWcwMfYjp98vgjccInsvzp8ypnMyTjsyau8yYaMyqO8wBPcyrOMy6JsjhazyKxMyYy8wvfbwrEcyo0cw51swqWcy8DcwfA7w+ury8dsyu/Lq/G7uNPMzFvrzNYMzZjczLJ8yLvMwsVYzI58zsj8yspszqCMzq78zB+8zOKczeM8zOUsySi8zdX8vIMhxJrkz/1cxNgq0JxL0K5r0EosN07cxFTc0A790MPixQj9xRLJxRUt0RZNkRltkRt9aw7w0SAd0iI90rw7yfpszNKck3eMx2tsxyw9x84XT1blx9//S8//87v8FMj8YoIzjWg1m479y9N97NNDzY47IdSUW9ImXbnczM8P+H1D/dPvGNRnFdVWLY6W3II2WKFEndRw7KqbgdQ1ndLgjNLJR9W5ddVjXdY3DM/Al9ZeTdM1fNapLNZzfdLurM0UWMs6YddLrdT5zNT7HIg1yFBcLdVSbdR9XdVx3dVrLYC83BN+HdiAHcxsnc5urVeM/diCbdZ4jdnd7Hpb3dOIXdTKN9mW/dnU7Nl7Hdk8gdq/rNp6XdlNTdhabdikrdZ33drkbISjCyszMNEIfcXDTdEajdEM/cQQvdzM3dwYncTGzdHPHcZbPN3o89JsnMZTgd0t7di7/03bWbnS3D3GLj3eZazHgOzdf/3VeYPTRBG8nTe8KpjanZ3X9VzC+MvOq23ftOzatuzL4WzTZA3Zvd3L8izgl/3OoR3J0Zzgs83ezmvb10zI/D3g9Q3aTu3N2Gzh4M3aMGjP53jgHA7hlWyr5KvfDw7UqSzf27fech3h4zvhERvgI67ifK3K+EzfHd7WCw7LOR7bF67gGR7PKD7PNa6LRP7jNO7g9w3JPt7gsm3kTN7fBY6NAO7hJK7jdnjjLG6IlJ3lQP7h+E3MSo7lNu7fOgHQNKbmc+HE1h27sOteyZ3czV3ndo66b47E1A3Gn1vde55M5l3G2h3oZfziX26h4v893uVN6Oithpy94+3MMu4dYAlEL13ujFpu6OgLpide5hUOzBhwAaI+6hfgngQO4gZe5AiuqxkQnsp56mN+z1Ae5CkuIcSpmf0p5k6+zp4e6S97AeE5lGUJ67ue372+37R6msGuAHPJ26hu5SI+5Q1qAcsulaZei6m6sUse5YvKntWuAI+p68ls7LMO6ciuoSbQAh2QK8dZne6J7Ze84dI+75dpALyCm34ZA84e6yGu6kdOEeneAet+K+3ulzgp7upM7t/M7f8elNV+AcQ+7mRe7mC+7YRKneE57Pte7BO/8LQu5boKnuHZmRGf8B0v7wxP73/r7X4Z7iWf2bxO8Zr/HqcbkAE2f/MZYOobL/Gy7vHmXuvxaOIyru1mrt4lHq3zKPNGn+lIjhZsvmFP7/TFPfUd3YVUP+eQAcV3vvVc36J57ud9zudwrudhD+iEnhBhdfbkvfRhXvFbmejcveiB3uh83NhSPek/odPxPcgzXvSJ/RQiIAAi0OMx7/Nu/+kpf+oTMAHr3gGLPwEkkL6drvSlvUMrMJSpSQOSP/Qk2/YzL30eIJWpqQAQwOmcz7J+b9o8MQGi35hSWRaNCqu3rPI/D/J7TQKMOZZhOQFZffpDm/qNTRsnkPuaGflIn+2db/G1v+oUCAGo6fpSeQJCr+EU7utAXzLDT/ya6QHH/x/v1X/u1z85uB+Wtonvuz/9SU75uh13rC+Vxomcfsn93T/5hv/5RU8boV/tK9D71N/3iP/xAAFA4EAAAgQQRJhQYUEBBAI8fLhCwUSKFRVAfGjQIUaOHT1+BImRgMGQJQMYXJgyIUqVLRm6bMkS5kKNHS1ahMCxpkmePUWS9NlR5syVB4kqHHp0YFKlDDdihHBz4gmhDYNeLTlSAFaMTJV6PQqWqNiZOzlCQJsWrYeqT7m+fagV7kmjTZfWtfsyr0CyMM1y/TsXq1y4fV0ajonXLmKVga86FuyT8FvGKSvTVNz0MlKrhTu/fQBXa2iuDwwW2FvgdOrVeVULQO0acv9P05+x1jaYW/du3r19/wYeXPhw4sWNH0eeXPly5s2dP4cevXhsu6+pK7XOGrb269hbV//eNLvrvXwzhz2PvrzevZuLrne/HmH8u/DTj71fNr/f/Yf7y2/sP8sEHNA+A8ujrz4A80qQPQYJxOzA9iDkbMEHEaTwPQwlvHBDCzXLkKAGHVwsRBFNVHDCD0H0UMUWO3QxxhX1e7FEDm2skcUZ8cvxKxTNu1FHGXdMrEf1jOQRSRqJ5E/JJp30L0gfmXwSRitxHFLIK6msMMspoSwSzAC5DHPLL708Es0kyYxQzSXdrNLMNNnUUM417XwTzzjpPFHMAv1sE84o+awTSz3/BxW0zEOpbHBER38kUUtDCQUy0TEB7dLSPylNcVJJPz1z0UtT40627Uzt7qjxwDuV1VSJWlU86WaltVZbb8U1V1135bXWV2eKVdXwvBPAAGOPRTZZZY01aFlnnTUIgVJRJRXT+SBt9lltj222AW+/BTdccRvQyNprOe1TU8y2ZdegBuYqV91M0W0U22LZ1dZdeBsyN110I52zRHzzFeDdjj6gIGGFKSihrX475bRe+wZ+Vl+OSmAgY401dljeQiP+8VGKoS24o41PZuADnfj1+Nx/Ab5T4JGVtRiiD1A+eYSuWBZ1U3pDtndmmkuGaAScT6ZgZwIervRfiTEUOtma8wOg4OiTNYAo3p4D/ZlphqJGtmYNrEY5a549DRVkr7MF24Cab97YBRVcwFlnupZu2d+u876rbWaJDuCBk1UQQAWUVc7obFADpvTpCf12G/AAXtg4N5RJu3ttSKWMGUTIpw6gcoNOfkFpzV+GOU+Z2wYd44wt33hlvLeeV22+zftc8odgeH10jRs2/fbU2XT8wdwNxshoBmBnAIaOaf+4caAn9hv0hzTQIDfsNcDc7NnRZpzQ4lcH23qztwJJa/A7F396qKvXfWf0P1J/cfb5HN9z+JHvSAQBRAhJ/dJmP2BNS1atOuDx4FKb7xHrVzAJlqoCAgAh+QQACgAAACyYAMwAxQAmAYX+/v5HPIoAAAAAgIDQzdzp6Ok7Ozvb2efI2dv+iwAAenpKQIn+AAA8OkjR5eVCNn6JWVizazjNdiRrTG8rFwB7K2PCEy3/wHVZRHybYEqqXQD/6tEaFhX/0JgzKVqJJliyGTn/lBP/2KssAABnMnL/yIcoKChKNEh7U2OqAACjHkSrq6vTDiDhfhb/nCX/pjr/sVP/uGOgY0e70tLffhf/rk3/8N4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wADCBxIsKDBAAQECDg4oeEJDicaYjCoEIDFixgzarSokMDBjwc7ghw5sOLGkxtFkhypcqXLlwMTLiyIIYFNCgIo2KRBUQDKnxhbwiQodKhAk0B/FjW61KjTmAoNRrCZQKEAqhAKIk16silMry+3ctUI1mXZpzBlFpxAtarCtguI+hzbVYBHtAHOkhRL96JelnbxCkYYlaCEtlbbRpDbN2VgvH9B8m0c+WPlwQbVDoTQ1u3VthNKzm0c9DHayz1JZ0St1TRmo5oFdvbcWYJo1aXvnnbtdHJf1nJ1vx4aO8PsxJ1RHB2NG7ho4b2Zq3a+HPpwl7HZdkbedmJe6aSpf/+3PtQ3XfHiBccOMLUtTp1tM9zGzZE3U/vlwVPG/5X/dZDr1dSWBhRo0FYLjNEHAHr+maXfbw2ulB5e6wUgw2zJJUgfg+T1p+CCEe4V4n8EVRgAhlTZpmFzI0rWomUPnvdiSDP+ZyJnKIa24nQ1ttZhWDGOxSGJaRVWUAQSJKmkBPK19uGQkAXJFZRErmRifk/2GJxg5gmp5XNVYmfkU11O+WV1XEqZFJVhAjhmdFn+6KCcEqoJFJttHnSlhwrieV+cadIZ5gIIvHlfAfQVoBACce3GaJSI4qaoAI862iiZVmWq6aacdurpp6CGKuqopJZq6qmopqrqqqy26uqrsJ7/Gqlqk87aWK2JKmTrrbrmKsCufeEqaa+SZtmnnUohi1KZaypbl7EfHivttBs66xi01UZLLY/Ystgtt9tqu1+44VlLlrmroVvat+LKSO6470LILrzt0gtutvG6m6+X9Za7r5nzyvtvs/3a6+/Ad6rrl8L1BVzws/hG7C3CyTr88LkUL8swiBYDnPHDzFb88bUjY1xyvSFr3DHBJ6/bsrgpQyzxvRPPfPDFLNesM80837wzzjL3bLDPQgtsM9Am/zz00kYr3TTSJDut79FPE2011C5Lze/LDXPNMdZBX10101Nr7THYSRddttpbU7022l2bnbPcCa9ct4LA0iUsaXvz/00srX/7/auveY/VN6+xJq744ow37vjjkEcuueIIEGD55ZhnnvkBk1au+eeaFzr4sKMDXrrghXNV6wGgt3656AWw7nrrlTq1wAEKXWrUAjGHLfbb+x2g+1C8CyA8WsPDZiiWXvceNYuCAhZ9lXsCabfIbns5PYzbk1i9g9er3PyZ4+U50vd1hu872doHar6bM+2m/vNsm9l9T/dfh76I86ddf7P529L79LQ8PmXvbHRLVgDBNMDMFNB64+tfuhaIpgYWZH+AkWDWEqgxCpbPgiV6IPgiOD4PTggtGHSRBhe2wvqYkHzqEWH6SHhAALoPhFCJH6Za+LUaJuyFHoyhDv/hREMO1gWIOMxhmnjoPP+VC4lJJMwQ/1TE//3whklMIYyYuLEmThCLONQijbjIRShmUYb8q+Lv2helIFIIjRlUI/vsB0YLEgqOLkpdUg4XrMAhTo9A6Vzy+lO76EzukIhMpCIXychGOlJTnpvd5zi3KEmCDnaEyyTpAPmT1Vnyc7BzAAJGScpSmvKUg3TJ7XKHPC9u0IrYY9HxnlI8BwzglrjMpS53qQAhLlGOY4PQCxGwy2Ias5dvnCLzfBhLWHYwTcQ0pjRxiUwU4nGLwASeMKE5zW5W8yliTE022wY9bnZTmt90SjidNE4E8miY50SnL6NExhKaM568nKf82jn/t3feE5+5TKfylGlAI9JvjXSMUjQBGlB97pCfd6sWPBna0GT+kpniw+gR/0lRgRLnmmOEaDOfyFGGerRIBIWgRg86RxsqlKIVtWZKR7hSJ7b0ii+F6S1P+pJ17gih/QQqTh2l0506lIg1TVcZSwpQnopppjNM6iuFqkCm4tOpVgKpOKXKQnvmVKdYJYlP58PVuBn0XBMF61GpWNYenvWLX4VpWM+nVXa21ZVdlahV4znXj9wRqiLiJEr4qDc/9vF0iCvkfRCggMY69rGQjawCHknZylr2spjNbOQi+cnLUZJSnc0cJjepSdMJ9iSeDO3rdCU71RJAsTBZpQBSuZLi/9XzrsajLUmKN0vbrXWZb53qTavaxihK8aLBzWtyXVhHC451ObddLojMGMa6/nS4GZWunxr43O9E15kbLW4Uu4tXs4KXpdsU7xkBG0fcLlW91WWvCkWaXX/CF4Tk7aJ+37sbN8oUuee1aTBlRF38Wpes2t3vsQrs3ANDl77rS29/jZvf71L1mfdtsHyx6V6vTni8DvYuhNFL4OZyN8TldWuAlbpgEw+wwiMWcIkzfOINh7TDuGVwjQF84Qhrc8YfRssDkjnkpzzAsIVF7GFPuxHCGo46Rz5TlDVL5Spb+cpYdiSTNeLkPSL5yUpO8pYz0mUvhxnMY8ZI7A7A5ja7+f/Nb0aUQuBM5zrjTsE49taa7czniuyZz3XW7UhkK2iQ2DbGLK5Wb3c350L79bcFXbFwB8zGIMeXx9j1MTnta2kD23irCeYvpvwLThTjOdQtpvGLTW3hTDtGx6v+tF1RnWMXvw/GeZb0wmB9a1YjetJAHjWFfZ3rHr/a1ubDNa0TzOtkE3vZumauqnst6+tS2p3GRiuy86TsaKs423DttIYxfe2gulrb03Z2tRHs7RS7m3wnLPW6H1zsc4db2CCet4jrXe6h4nu95P7xpsGdm223qdsEV2674U2+Isvb4UaJcpovUuZAfll1FzczBaO88Sx7/OMgD7nIRTVxi1R8sBn/t/iZMb5yM5ccACdHecsD6YCa2/zmOM+5ee0N7IHznIV/BjSc/Sx0OyugqANwQHnf3WoILZp4jUbe0Ysqul8rPOHSFvdQpq7TqvNb4NjeT7NXwnWYeh3aWP+22A0+krJT9OwLb3qwo+NBtzMU7mlnuof//RS7AxTvP9954NUuYb47xe/4BHy/Rzr4d4+dJIiPp+LBbu7F15ek6SY70ifvc8trOuyFp7tgIn9OzoOe8hFlNttBQvpumr7yqGe854+dechvfumnjnuqtQ6T1k/z9anXfa1r3/bbM0zvX6+04Y3ie2kCX/axv3zjGV5341td8LNHN+9f0nxjPl/62U+0/+qJz3rrJ//0nZ/7fapPddzLPf3KFz1eJEt/yc4g5TJ/ecxRi//8+6rjUjZyAjiABFiAlKV//ddkCaiAM9dJC8hlD0hmEahmE0hxRVdncmY8F0hnyId24xJ0G3hnMBeCb+ZoB0ForZR7eadfTxdbUUdLkKZSHhh+BUd+RIJw0ydq8gdw9HR9hKd++UFq6vRswjd+26duAQd/6LeELnWE3EaEK6iD6zds+taBRchpy7djPXh+sBd/U5hvSciEwbeCjxcmOEiDVzd9ZUg9UJiDe7eDl7aFMxh94eWEB9eGaIh9dEh7dmiGeLiHMqaETZiFsRaGXSiG0OeFQUiFhjiGbv+oVzboPX8oiI6Yh9O1ejc4iYgIfoB4b3DoaY2YiJv4eYP4iRFHZEJWgSanijDHivvHgAAIgAY4i7RYi7a4KgjYgPyni7uYi75YWqiTKCToZhk4jG5mhVEoLSC4gURnjC34EihIS8j4iLJkggbBW9ZYENnoQFWogtSIhaaohfvEhZVYAhdwjuh4ARugfYRIbaHIiZTIeB2AIi7AjuFYiHJ4hVcTAiiSACXgiV/Ig+M4hwN3Af2YACEAkIsIhvmYjDOzAQdpEzFQg33Iht34fu5UAxFpE+uYde2IhA35jT4jAhtpEy+wa5goiRfpg1sBA51BIAYyGx25Xfg4kPpoLxD/6R45MRswgJKRaCOaeIhCGXwxgBhv0Rk24JMVmYkrSY4jxY82wR02cQEU+ZFP2JQEiW0lQBVSmZBVeY/uGJKWOI0uEJVHaRMioJD9IYQD9Y6kWInAQ5K0YZL2GJBxaJMO6TQi0AEcIAAc0AGA2ZFfaZegKJad2HPo15ccQFxLqZJuSWLxCI8mIAAmwJhWeYdYeZOUtgIGsAKWCZYuAXEDJZowIXHA+EeneVixiBdTdouu+ZqweYu/aFqpKWa1iWa3qTrOyDkgspsiyJL7tYwX2IzGuI0DEY22M41jyYLGKRDYiDwxSFNZOZSfSZjjZpiR+ZaiWIoCEQHe+Z3eqSMC/ySQD+WU8JhQBGEcGIIgPsKI2DmKkAmf9iggKJIV43mX5TmdcBl/7dGPuhNvQ5iZeblwwqEdB7kYDESeSKWf2+lvh7GR4gmgbfme1HmeFVqHOLKRKvJBCspWDGqhQ/WgVEEgnEIBHlBBHQpcmnmYSmmgN/EpKIqfC7qi2cmH7DEgOMEpJ8qhMuqhNCqf90af/dgkMVqYeCmSalgQ6tmP7cmQR7qcUniQ9pmgPaqiA0qGBpGhtYE/7vmkLKqHNVqXAQABZFqmZOod92mk+fmjF2qjoHmVjxmIQIqY3LmQKRppV5qkP1kQpIlSfRqarhiovMiAubiaqRibiJqoivpxs/8ZjKT1qLQJqY5KKy2knF/6g2EqfnADpm0qp52qqXmKNu4HnBiJNKNqntoJog2KM6f6oan6qvFpqsfnjVDqg5VKq5dqqZnaLq3Kpvs5p2koqrNaqr/6qXQKNL0aqrmKq1iTrEhaq211q8S6qqparSAzrKRqq1yjq8DKqcYarGDjrNDqqrGKrNiKquVKrbDaL+K6rNNqrQXTrrsKrvPqrcVqrtmKrp6Kr/oKqs96qdKar+TKZbmpco06qQiLmpKKOIvasA77sI10sEvmKwZQsRZ7sRibsRWrEBrbsR27KAVrsIkSsNXisSZ7sRzbACq7sizbsi7bAB2hrWX1bidbswr/0QCBIrPLRbM1a7I3m7P9yqvnyiI967MCgLMGAQIWsLRMawEfgD86G208W7Qa+7MF8QEMkLVaq7VQG7QwM7Q8QrUfe7QGsbVmywAg4CNRi3VTK7YoS7YEAQJna7YkEBxrG3ht67YbC7cCQQJza7YWYLdeqy3yaiZ6i7FWKxAW8LdmWwHPcbd5mLd6m7gVwLhn+7iDGy2F2yyH+7ZIGwByu7UpMAKecqIxm7kkS7Sdu7ef+wBmS7oweroDizJgWy6ry7oEoQKiC7s6Oh6Q+6WS67aJKxCWm7UqILizK7Tjc7sGMLwBgLWWq7aou636xbzOGwAswLhPi7y+yqq1ux/WSsu3A+G3c8sCXZu8X7u8t3u9AVAB7vu+7kuastu9F7O5CRO+nztqv1uvwSu27LuW+9ut/Uu1/wskBOCDEmubpIO/rDm/ChuptBIQACH5BAAKAAAALJgAzADFACYBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/6LAAB6ekpAif4AANHl5UM2frKxv7NrOM12JGtMb3orZIlZWCsXAFlEfMETLf/Adf/Ql5tgSqpdAP/q0xoUESsAAJIjUbYXNv+UE//YqzAoXaqqrv/IhygoKEo0SGYycntTY6oAANsLGuF+Fv+cJf+mOkEiT6YdQ6BjR768xbvS0tMOId9+F/+uTf+wUf+5Zf/w3gAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwoMEABAQIOCih4YkOJxpaMKgQgMWLGDNqtKiQwMGPBzuCHDmw4saTG0WSHKlypcuXAxMuLGghgc0KAirYrEFRAMqfGFvCJCh0qECTQH8WNbrUqNOYCg1CsJlAoQCqFAoiTXqyKUyvL7dy1QjWZdmnMGUWlEC1qsK2C4j6HNtVgEe0Ac6SFEv3ol6WdvEKRhiVYIS2VttCkNs3ZWC8f0HybRz5Y+XBBtUOpNDW7dW2EkrObRz0MdrLPUlnRK3VNGajmgV29tw5gmjVpe+edu10cl/WcnW/Hhpbw+zEnVMcHY0buGjhvZmrdr4c+nCXsdl2Rt52Yl7ppKl//7c+1Ddd8eIFxw4wtS1OnW013MbNkTdT++XBU8b/lf91kOvV1NYGFWzQFguM0QcAev6Zpd9vDa6UHl7rBRDDbMklSB+D5PWn4IIR7hXifwRVGACGVNmmYXMjStaiZQ+e92JIM/5nImcohrbidDW21mFYMY7FIYlpFVYQBBEkqWQE8rX24ZCQBckVlESuZGJ+T/YYnGDmCanlc1ViZ+RTXU75ZXVcSpkUlWECOGZ0Wf7ooJwSqgkUm20edKWHCuJ5X5xp0hnmAgi8eZ8B9BmgEAJx7cZolIjipqgAjzraKJlWZarpppx26umnoIYq6qiklmrqqaimquqqrLbq6quwnv8aqWqTztpYrYkqZOutuuYqwK594Sppr5Jm2aedSiGLUplrKluXsR8eK+20GzrrGLTVRkstj9iy2C2322q7X7jhWUuWuauhW9q34spI7rjvQsguvO3SC2628bqbr5f1lruvmfPK+2+z/drr78B3quuXwvUFXPCz+EbsLcLJOvzwuRQvyzCIFgOc8cPMVvzxtSNjXHK9IWvcMcEnr9uyuClDLPG9E8988MUs16wzzTzfvDPOMvdssM9CC2wz0Cb/PPTSRivdNNIkO63v0U8TbTXULkvN78sNc80x1kFfXTXTU2vtMdhJF1222ltTvTbaXZuds9wJr1y3gsDSJSxpe/P/TSytf/v9q695j9U3r7EmrvjijDfu+OOQRy654ggQYPnlmGee+QGTVq7555oXOviwowNeuuCFc1XrAaC3frnoBrDueuuVOrXAAQpdatQCMYct9tv7HaD7ULwLIDxaw8NmKJZe9x41i4ICFn2VewJpt8huezk9jNuTWL2D16vc/Jnj5TnS93WG7zvZ2gdqvpsz7ab+82yb2X1P91+Hvojzp11/s/nb0vv0tDw+Ze9sdEtWAME0wMwU0Hrj61+6FoimBhZkf4CRYNYSqDEKls+CJXog+CI4Pg9OCC0YdJEGF7bC+piQfOoRYfpIeEAAug+EUIkfplr4tRom7IUejKEO/+FEQw7WBYg4zGGaeOg8/5ULiUkkzBD/VMT//fCGSUwhjJi4sSZOEIs41CKNuMhFKGZRhvyr4u/aF6UgUgiNGVQj++wHRgsSCo4uSl1SDheswCFOj0DpXPL6U7voTO6QiEykIhfJyEY6UlOem93nOLcoSYIOdoTLJOkA+ZPVWfJzsGsAAkZJylKa8pSDdMntcoc8L27Qithj0fGeUrwGDOCWuMylLnepACEuUY5jg9ALEbDLYhqzl2+cIvN8GEtYdjBNxDSmNHGJTBTicYvABJ4woTnNblbzKWJMTTbbBj1udlOa33RKOJ00TgTyaJjnRKcvo0TGEpoznrycp/zaOf+3d94Tn7lMp/KUaUAj0m+NdIxSNAEaUH3ukJ93qxY8GdrQZP6SmeLD6BH/SVGBEueaY4RoM5/IUYZ6tEgEhaBGDzpHGyqUohW1ZkpHuFIntvSKL4XpLU/6knXuCKH9BCpOHaXTnTqUiDVNVxlLClCeimmmM0zqK4WqQKbi06lWAqk4pcpCe+ZUp1gliU/nw9W4GfRcEwXrUalY1h6e9YtfhWlYz6dVdrbVlV2VqFXjOdeP3BGqIuIkSvioNz/28XSIK+R9EKCAxjr2sZCNrAIeSdnKWvaymM1s5CL5yctRklKdzRwmN6lJ0wn2JJ4M7et0JTvVEkCxMFmlAFK5kuL/1fOuxqMtSYo3S9utdZlvnepNq9rGKErxosHNa3JdWEcLjnU5t10uiMwYxrr+dLgZla6fGvjc70TXmRstbhS7i1ezgpel2xTvGQEbR9wuVb3VZa8KRZpdf8IXhOTton7fuxs3yhS557VpMGVEXfxal6za3e+xCuzcA0OXvutLb3+Nm9/vUvWZ922wfLHpXq9OeLwO9i6E0Uvg5nI3xOV1a4CVumATD7DCIxZwiTN84g2HtMO4ZXCNAXzhCGtzxh9GiwOSOeSnOMCwhUXsYU+7EcIajjpHPlOUNUvlKlv5ylh2JJM14uQ9IvnJSk7yljPSZS+HGcxjxkjsDsDmNrv5/81vRpRC4EznOuNOwTj21prtzOeK7JnPddbtSGQraJDYNsYsrlZvdzfnQvv1twVdsXAHzMYgx5fH2PUxOe1raQPbeKsJ5i+m/AtOFOM51C2m8YtNbeFMO0bHq/60XVGdYxe/D8Z5lvTCYH1rViN60kAeNYV9nesev9rW5sM1rRPM62QTe9m6Zq6qey3r61LancZGK7LzpOxoqzjbcO20hjF97aC6WtvTdna1EeztFLubfCcs9bofXOxzh1vYIJ63iOtd7qHie73k/vGmwZ2bbbep2wRXbrvhTb4iy9vhRolymi9S5kB+WXUXNzMFo7zxLHv84yAPuchFNXGLVHywGf+3+JkxvnIzlxwAJ0d5ywPZgJrb/OY4z7l57Q3sgfOchX8GNJz9LHQ7K6CoA2hAed/daggtmniNRt7Riyq6Xys84dIW91CmrtOq81vg2N5Ps1fCdZh6HdpY/7bYDT6SslP07AtverCj40G3MxTuaWe6h//9FLsDFO8/33ng1S5hvjvF7/gEfL9HOvh3j50kiI+n4sFu7sXXl6TpJjvSJ+9zy2s67IWnu2Aif07Og57yEWU220FC+m6avvKoZ7znj515yG9+6aeOe6q1DpPWT/P1qdd9rWvf9tszTO9fr7ThjeJ7aQJf9rG/fOMZXnfjW13ws0c371/SfGM+X/rZT7T/6onPeusn//Sdn/t9qk913Ms9/coXPV4kS3/JziDlMn95zFGL//z7quNSNnICOIAEWICUpX/912QJqIAz10kLyGUPSGYRqGYTSHFFV2dyZjwXSGfIh3bjEnQbeGcwF4Jv5mgHQWitlHt5p19PF1tRR0uQplIeGH4FR35EgnDTJ2ryB3D0dH2Ep375QWrq9GzCN37bp24BB3/ot4QudYTcRoQrqIPrN2z61oFFyGnLt2M9eH6wF39TmG9JyITBt4KPFyY4SINXN31lSD1QmIN7t4OXtoUzGH3h5YQH14ZoiH10SHt2aIZ4uIcypoRNmIWxFoZdKIbQ54VBSIWGOIZu/6hXNug9fyiIjpiH07V6NziJiAh+gHhvcOhpjZiIm/h5g/iJEUdkQlaBJqeKMMeK+8eAAAiABjiLtFiLtrgqCNiA/KeLu5iLvlhaqJMoJOhmGTiMbmaFUSgtILiBRGeMLfgSKEhLyPiIsmSCBsFb1lgQ2ehAVaiC1IiFpqiF+8SFlVgCGHCO6IgBHKB9hEhtociJlMh4GYAiLcCO4ViIcniFVxMCKJIAJeCJX8iD4ziHA4cB/ZgAIQCQiwiG+ZiMM8MBB2kTOFCDfciG3fh+7mQDEWkT65h17YiEDfmNPiMCG2kTLrBrmCiJF+mDJkECJtABm1IgGNKR24WPA6mP8P9SAJ4CH21xAygZiTaiiYc4lHfjkh0Ak5kik52RAz9ZkZm4kuQ4Uvx4kBhAkR/5hFBJkNhWAgeZkFZ5j+4YkpY4jS3QjyKgkP0hhAP1jqRYicBDkhhykmgJJGr5UVmJk8EkAhmwl3yZAR35lQEZhzfpkEZ4lXd4l4S5cGv4lGxJYvEIjwnllAcBcQNFmTAhccD4R5l5WLGIF1N2i6AZmqJ5i79oWpspZqeJZqmpOs7IOSDSmiLIkvu1jBfYjMa4jQMRjbYzjWPJgrgpENiIPDFIU1pJlMQlmUGJmCKphgcBAc75nM6pIwIkkA8VlZBZigFgHBiCID7CiGLZiT03inz/KBACgiJZMZ2CWZ3F6Zbx1x79qDvxNoTK2Zuiph0HuRgMRJ1ItZ6iOIiHsZHSGZ9r+Z2P2Zb9OVQ4spEq8kH6yVb8eZ1D9Z9UQSAGMhveIaB22ZiBKJ4b2oT2eRM5MRv4yaDpuZ94CZ5NyR6I8Rad4R0kCooEyqHip5gEUZ60ER/oCaODuZy9CR3aeaNU0Z0MuaP0+YYnYhPckQDnmZ8l6qAnWqDjKRA4wh0LyqQ6qp5PKqOAGQAUQAFW0aUU4KJWOm4xapwQeqDH2RMzhaEopaEzmpiYRyNrCkMWVabsqaVpGHoEMQICMALSIxiWiVKB6hKYSVqGapqk05mpOJqM/9qojvpxpRmMhyqpiFqplMo3LcSbKKqHUNqhQON+somRSAOq1mmgZ3qqIHN83likpVowpPqgphqrjgk1r5qlZiqrnooztQqnPLqp0bKrvdqpbzp4mbqqvqqpwtouwMqqsDqro6qqonqnt+qsnwqtoeqDxRqtaIqq1Kqr1tqqucqt4Zqq1wquw4o2y3qsxpqseQo26cqunIqn8Tqt/fKu8vqD8Iqv95qt5dqs9fqt/nqu6go341qwBiuw+YqsqrOaKhepl6qZkwqxpvOoFFuxFttIDrtkvlIAHNuxHvuxIMuxChGyJEuyi8KwDZso/FotJduyHjuyLuuyHUGbRaewz/86PjErswKQsy2rEDLwmwFwaG21sizCsyULs0YLsgrxAG4anjeLW0kbskgbtR27tAcBAheQtVp7ARMAat5GtDxCtR87tWJrtQXxAQyQtmqrtl6bdmBbLmL7sjsbtyIrAExLEA6wtnrLADAwa1/LNe9Gt3UruDpptwUBAnurtxBns9WKs4JLtlRrtgKBAomrtxdgbb76tvtBuJAbtZIbADRQuXrbtfQmXZoLIZw7t3QruWgrumu7AuzmtoCrX6mbuncbAIi7tirgAZziAS9Qun9bVoH7uKobt5JLuWvLu54CvLIrvLRLvLZLELmbtrvbKX66b6Y7u1lSu8R7uwLhumlx+wGYm6+nKyPcu7qGSxCtK7p+27zLNbzoG70FsQKiS7qxS6zaeyzna7zpSxATULmX277467zbC73dexATkMAKnMCWybje6rjxe8B2uq3kCrUGjL7ea6LuG23wy7/yS6S+mrGoSTr7W7YCQAKe6YquGBAAIfkEAAoAAAAsmACzAMUAPwGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnb/osAAHp6SkCJ/gAA0eXlRDZ+srG/iFhYsmo4KxcAeitkaUtw/8B11Hkey3Um/9CXWkR8ql0AwhMtnGFKthc2/+rTMylakiNR/5QT/9irGxUPLAAAZDN0qqqu/8iHKCgoRTFMqwAApGRE2wsa/5wl/6Y6eFFmph1DvrzFu9LS0w4h4H4X/65N/7BR/7ll//DeAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AFQwYSLCgwYMIBAgIwLBhgAwUKKQQkCJiBocMFQLYyLGjx48bFRLASLKkQ5EmU2LUCLIlSJQqY8KMmVLgwZs3Ey7EuCCBTwkCJPi0QJKly6MhBYykmXIm06ICkEp1+vSk0qolbeLcOlAnyQg+EygUEBbCyqhSj1LFGmAtVqNpX15la3UpXYZaueL06pBCWLEK/y6wGtel26qHn8It3DExU8c08+pFqBDjhb9j/0YgzPgjZJlz77ZF27lx6LufVUqeXJAvQwh/AZP9S6Hh4tKpm56me7tzbpO/s7LeW7lhbNmxL9gmXTqpXdS72fZmHBzqc7qrhw9wzeF45tgvMjL/b1797HXp43FHf7u+avbhrv3G/v734ujmps+z1484ve/2igHI1HusuRbAZX8BJdRfHCyHH0fl1SWaeA86N+Fo/D1F4GQGZhCbBhJoEBtnD0a4XIaP+UedgDSZiJd2lO3U0ArHxWaWgxW6iOGF0xWmo44b6mVgAD3V6JNyJOL3I4ugVQjAkihGBqNBQwYAm5G1JUkekyot6SSUFwbJVZUBRGDBmWhasAJUX3KpW5RN5ugmcHMKNyVBZPbXJpxd1smmnHy+GeadeBYnWo9xgTkhomkpKpqYW+UZ4J48+nkWpYta6hCkxMkIHaaHaopjiaKKF6hJCqSq6qqstqqApCka//CgAQohMFiotmYqK360CpArrrdCN9awxBZr7LHIJqvsssw26+yz0EYr7bTUVmvttdhmqy20CBDg7bfghhvuAb12K+654uq0a3O9GnAAuvB+q+678cKr7qwKuVuvvfnSu2+6wbK1wAEKBYzVAoxOJcABBleF8MINP/VwmwyL9nDFd0VcFQGGfgpopaeWlDBSjl54F8ee8qaij6XumCmo0IVsMkYoVwqzyjJf+nGmOc/MUM0v7xxqz6Mq2bKOFwId6s3oEU2h0DH7fHLHKjO9n81Q4yw1XUp7TKrTLi+dddNbs9V11WNfHfTXIJeN1dnoWd0f2COrdXTLSVMdd9pzY//NNs9ub6w3e3IHSPfKid4Ndt4p7/330H4bffjiE8JNON+GR77l5IE/ZbmemKd4eOGid87U55OG3uLdpK9OuWiop9g6aKOrTrvpNMW++ux91v541LirpHucv2u9tuRtB5/S8H3yLujxmyevfEnM6+Y8nb4jD/j0JC0A6+rrllauxin+Cl34nY2fqfkqb+v++/DHL//89Ndv//3uo89Yu7zmi68A+isM//oHwP8FMC4DZFeb5LRABjqQVA10EgQnSEGjRdCCD5TgljK4QQ6q54Id1GAIR0jCD3rwPyAUIctOuKIUopCFK1ShCTFYwRK+sIYzlGEMachDG7YQhonT4Q3/fbjDHuaQiEEUYhGPyMQhIrFRiIOiEpfoxCb+EIdVzOIU5QJEKXZRYS684hbt9kWSRRGMZSRjGqdYt5a00Y1nNGMYqajFMZpmjXCcYxKxKEY7cpGPdOyjFQNJSD8+KY6GQWQi9ehFQO7RkJ5R5B8d2Ugj1vGRkLwjJdG4STnicZKZ1KQlBXnJSj6Rk6GEkCQjyUhUntKTndziG1n5SVrGcpG19MgB05LA9PmPgLuUSi99WUBgGvB/+EumMpfJzGY685nQJJa5/jWuclEzXb9kVzbFt01iBhMp7fLXNb01r3GCi30HI5gAyBeTieVSlKNkGcbocjF2qsSeMakenVoJ/8t4Jo5z3COJPkW2Slv605TqAWhAHTLQP90yj+9UpUIX+rPBge6hoHyl3SZK0YbqDKMG1WgiObpQj2ppkJhEKRRJGlCTFk2kEAWpR0rWUYumTqbwhKlcWMo9lz4Np6rk50alR9EA+PQ+EU2KUEdK1Jo27nJAVWpSD8nT6R11liFVqSsT2tSS2lR2U8XqTAs61qoq76pkzalW+xm97RXVqF/dXVjTGlTb9e6tFX3qRQ+61VIqzKzBQ+tSY8pXMwIWd4Kd62B32tWWxpV4hVVjVKna2J4+tnmKnethO+e9y+rmm0cZ5v66OdpiatNX+NQNOtkTzda69rWwja1s6TdNc/+Sq1bmlBdpBbhbBPaWl78VZr9yS858NQAByE2ucpfL3NSWZGAFs5hY1epXtqpnngJTSAMINRAFME5za5VsZJm6Pu4OwLuV8+w+MzvZJSHAvOiFnXoJutisVneo5eVufKem15uOl7A6jeTh3qvf70IvvLhs790ITKj9cm2+Dv1vRhEMxwHD18BiU3B9y1opBt/JwWaD8EclbF9SrsjCBU5vf8GqYc12+MIqBu99E0xiDue3wRj2Wo2pa2KWoRjHMT7wjAFMYcbe+MM5RluLFfxjJAc5wzuuK5NfnGL5rliuS46yhY48JRC/TcQnHfKExSxgKgPZyjLucUrJbGNcwRj/zUJWM0LZnB8zOxnOUA7wWDdcZy7DyMuCuzJk9czjQq7Uzl2eUGcF3SfQukS0vDUtNyVNzNX2BwGuyrSrZsvpTnv606BubW3HeVtfEZcA9zKmqk/r6JaE89TlJK6lmQLddUqXrlLN8pawezCF8NphSXaclg/JZ4lW1qpgfmmRS2zovx77rMn+6bCnK2XtQQ6vidX1stsMvLdme9q4JrZdn+ftaCNV23Q2tluL+m1CV9vdW752uRltPfaCe7Oda/e2C73mEz87sOamdq7BfT2Cvo6/aW62ddMd726zO+DhFri4p7xupya83woXb1vl/XB6rxfdcnZ2xb3qcfraG96U/x25Y0seYZRL/OWKwytcWT5il0f85vgOnL4ZPvF7/xuxEC/2wG2e85I4QMVHv4sDggtOpofW6Y+GuqtdtPRSVT3UWM+61rfOdWW1GiSQBi6lS/v1j4Rd7GXXpdTBvnazH+DtcI+73OcuK1/P/e54h7nQe75Bd+H973DXiN8B/3fndk+dhueJ3k9+3cQ7pJ4WCzZUCe5ilVuW5mEO+cI1j1+OWzzOGacx0X9uup1zXvT7VrfnSX7xOZ+eyP8petlMH3rYv97Iq1956/t6e2b/k/T5DjrjeU5T1oMe48h3vY+Br3Phg7z2uHf45/Oc+qFXP+W5v/zuNw993x/a8sjGvP+yiX/zgltH5rRPPu+7z23jzXv7Gr8+zGW/tfQrX/3c/z34oS1+adt878Wne8d3fwS4fvqXfeEHf6hHfuZnHheSdPwFgWxRdWnXEWcnXGMXaRXIEb3iNFXngV0XgiI4giTYWhu4ERf4dBnoWyuIdse0apN2ggBgAA1QgzZ4gziYg9bXe3s2fC00eIR3d4IXhH/HKTfRAAK3eM+XOL8mMb7meA2xAEYYIz7Ifvz2fQg4IOalE1WIf/EXe8xnJ4TChUtYgPmHhdLHFlNIJUlYfl2Ihu53F2vYGm0IgG5YPGQzIXNYKLimhJSHh2rzKFtYh29ogHCYh4LIXWT4h4y4cWn/iBV72BWEWIaGKHJZKCWKOImNKH8xp4eD2Id3uIlguH+YOIaa+H+Vd4kxEYnbcYqcaIf0tymfWIhnWImGFYYkwYqLiIqUeIukuIqz2ItfyIN9poqqEYyiSIzvxlW/eIyZCIp2CIu4iBG66IoMmIqP6B6ato2pIgNtZ4HfCI4tiIEymIIqCIJWV4LquI7s2I7XUo7hiILxKI/j2HT1qILweI9Rp4+uRoR4V3cL449CGIq8aEFAKJBvN4QIGXdQyBC11pBE4ocFuWsQCXkZI3l7NZGcGIs+Y3+2OIxWqHrZmIAD+JELqIwNF4fTp2MaSX4cOTMeWYsyCZIHOJL8p4C2/xeSO+hvzRh8/XduyaiT2GeTAPeTEvmK44Y96Od8QemF5GWMPomTY4aSfMeMUNl8RkmQSAmIfYNtTNmSVBmA2leSM3mSQimWJEl910hxV+k2MUmTZgiXlkiUQJeV0YiNKml8ahmWWjmKbTl7X7mVPteTWCmV3meSOVmTeYkVEhhiAtCYVUGBL8hqkzlp6IiO7piZmrmZmZmPJ2iO++iZlelNo7k/Cxl3AHmacXeUa2mQqpmQUXGQCFmRiHdrd6lgTUhrTxh5T8aSgjl6hOmWgdmaPnQCFXCcyFkBHlBmwQmYdkmLX4gBRtICzPmX9TecfOlCIWAkCXAC7YeIHWeYPf+obRXAnQkQAt8ZiOFJlnJZlgDmAebpEzdQjHRZeth5lgRZA/HpE8uZkuC5kkrWlJUkAvvpEy4gkos5lnuJnw1kAigwAsUSIjXSn2h5k+xplk4JUQWALAvyFzaAoP+pl75JnCbkoCMAocMiobGBAyCqngAqbGBJRdtpnhVAnwmaliOanR50AuaJnjYaogK4oBk6lXTUAtwpAunZle93oYkZlwtIoDVyoEmaOV75nMJ4kiKAAVq6pRjQnz/qoiIaoDE6pNVZn1HJpERKpuNpbWZamGh6mO6ZpocIpoyJdKIhmTBImnlaWpc5IVfHmYAaqIIaaqJJmXuqgaWJqIeKQK//SS6H1KjqdJsEJ5sCqZCqSZvRlTGsqaOkkps0YZH0hJH+NaZOWmHTKDVviaGlKqdz2RAR8Kqw+qpZIiFVKp5XiJis6osO0R01QgPnV6tvuqak2qoewp03ciJLaaUCiqtlWibxGTBI05ti+psb2Rf7uRnICqxCuqpw2p6ddyAFOqvRimc5yqCpeCX7iSRhs57byqzCWq0MgSBhASLGIgEfYCrJaqvLSKJ+KR9hARTIgq/aWq5qeqtxWqZgMa8AWyz3uq4vOnnDerDcVqzm2SDZuqTtKrEG661PyRC8yp0OOLDTyq98eR7meawCi7EEy63v6pIkga7J8asqO7KcOpgY/wEBOJuzOGsftDqzMEqtLtuc16msEcuxpiq0qHqfBbuvJXuqDQGZgQa1TIGnhlq1Mfg/fXqng7q1XNu10VSoV7uoLii25JioCCR0m2quV6pE1lizRauqdtS2arusGitCcru0Ozm3KIe2fem2QJtJd8uyGwu3dZu3hhS47jq4TZq4TAtJiFu4VUmyQsm3kvq2iztGj2u0uaq53Uq4sgSN0Om5onu5nxu6pDu6m4u6OpS5qtuyfstzlGu6qXu6tCtErFu7nYu7rju5SZW2eBu5rwu4oLu2s1u8uWu7w0u3nLu7vxu7xHu80Mu8fnS7xiu9jGu4gmu3yWu51au43cuBZv9btmRrj2AbtlZLTF6bvuq7vu9TvmT3PwUQv/I7v/Rbv/GrEPabv/lbK+ErvgrUuzenvwI8v/g7wAMsEpTqj747vdurHgZ8wALwwAKsEDFQkQsctw38HxKsvwW8wfWrEA+QEh8gAA37ccPmvEbjwfbbwSosvyBcEh2wAWOxATQ8AS13fSi8JS1Mvyy8wy+MESDAAAwwFkIsxDcMuwDcJjtMwBG8xPcrACHsEA5QxERcxDBQczicxHLixE/MxRsKxRjRAVSsEEUsxBJ4wZibwSvixT3cwj/MECVQxlVcxBuQebwbVTDHxk3sxG8cADMgx2Rcxgxgw/6XxXgcwFzcxirQ/MZBDMgCIMgMwALjR5U57MCJvMdL/MZi7MiQzAAlUMhIfMhKfMl6HMUBEMdlTAICQAKQ3AGTfMdalsekfMmmHACbLMQqQAIqAMkQiMalq2B6XMoY0cmCDAJ23LxaTCrBTMtATMxFjMWhHMuIzMeY7MNgjBEs4MyE/MrILMpbPMvUXMsMMQHEXMfQTMnJnMLgnMnXjBET8M7w/M6Q6ctspMYss8zhrK/Yi8Fzhc/sLM4/G83wJsvULMzB6r1m17/4CL/rbM0m4KfzOIMRbQABAQAh+QQACgAAACyYALMAxQA/AYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dv+iwAAenpKQIn+AADR5eVDNn6IWFhoSnGzaziGJ1q4FzQsGABaRHzUeR7MdiX/wHX/0JcwKFyrXQD/lBMbEw2cYUr/yYn/6M13LGYsAABlMnT/oS8oKCirAACkZESqqqrbCxrPDyT/tFn/1qb/4b14UWafH0iiHka70tLgfhf/rEj/vm8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wAVDBhIsKDBgwgECAjAsGGAChAiSoRQwSFDhQAyatzIsWNGhQQsihzpECTJkxYxelzp0STKly5fnhR4sGbNhAstLkjAsydPCyJVshz6UUBImSdjIg0qgKhTpUtLGo06kqbNqwNxiozgs+uDlE2dDoVKNQBZqkLFtpxaVurRtgytYrWp1SGErngXSFXL8mxUv0vT8t0IGGlhmXLnIlRo8QLerhH2Du54GCZbuGbDTiZ8GW5llIkVF6zL8MFjvBAaCt78OWnntqsntyY5u6pouowbnsZ7QbXmzUXfen5dNvbg2kyFtw19ewBpD7vxurj4GzhysMqLV2dNHG33qMxv1//VoLB8eQob8FbMDJxzdu/v/26X/T1wfaThRdc1z18Aha4e+NaeRte5hRl1AwZ3YGbxLZWfYuP1pxB6XUk2YIG+NWjYfMfdJxOGcTW3WE4NnRBdAl8JmCCIDC5oHF8ssvjgXKQxtNNuvVnYXoweWpYgADxqiJiIBtVY2m6p6Whdjyjx+GOQC86IlZEMRWDBlVhacAJTTzLpmpA+ruglbWPaRiRBVHrXJZhNlsmlmGx+GeWZaOaG2YtqQXkgnmLpiZmUV6Up35ouugkWoXsa6hCguJE4HKJ3KqrihZJSFydJCmSq6aacdqqAoPYZMKABCiGgV6SmJipqe6QKkCqqpw7/J+GstNZq66245qrrrrz26uuvwAYr7LDEFmvssRIiQMCyzDbrrLMHtKrss9Q+i9OqwLVqwAHVdsvstdx62+21oyq0rbjjmhsuutbGWtYCByjkLlUL8PmUAAfMG1W9+Oq7FL9d5osZvwLD5W9UBNj5KJyFXjqSvUT5uSBcCTsKG4cwVtpiopAO5/DEFlVcaMcXf3wow4maDDJDInOMcqQqT7qjxiwu2HKkJGsXM4Ive7wyxQpfnDN8I/dc8s9t3bwwpTtvjLPROiNdltJCQ020y0w3LDVVVGs3tHxNQzwWzRrbHLTXVoNddNYpb43w2WqmbV/YGOdJdtNmW4w22zCv/z0z3Xgf2HXcfPuM9d9au43U4IPKvSHdX8+t+OJwN1740YcvCfjkMjFuX+SP+6154pyf5PmGoH9IduqWBY7Z6aqz3ibkjqvuOtB6E45425lzt3npplf+ee2ti+476cDrBOqG2G4m7cEbvjpc85M9n6j0FyOr/fbcd+/99+CHL/74vVI/mLasmluuAObzhX767K/fvlrvZ9ulmPfjrz+l+f/I//8AnFn/BLg//y2pgAdEIHcGmEADNvCBEFygAunDQAdmbIIdqiAFMXhBC0qQgAGM4AZD+EEPdhCEKBRhBjloNxOOUIUnTGEJYdhCF8Zwhjh8IQ37VDce2vCGOszhCv9JGMQi/nAtLPRhEu+lwSEecWxLjFgPmRhFKFbxh2JbSRa1OEUpNhGIRnwiZ67IxS/WkIhOFCMS0QjGNAqxjXBUI5C62Bc61tGMSmTjGeVIGTuuUY95lGEY98jHMQKSiof0Ihn/WEhDCtKNgwzkDhHZSAL5sY94pOQkFZnII24Rk4sEZSfvGEqOzE8s9aue+uB3SqekUpXxY6X81ke+WtrylrjMpS53ictpsQta0vqltVaZLWI6z5iwbCVRtLUuYS4LXM5sFvboFS8BQO8lACulIx+ZsYK1hWDXREk4XwK7MI2yjNq05O+SJ5JytimTnOSm3dbJToe40zXwtOI510L/z3qyTHioS2dR8lnHfvrznmQiKDr32UeD1hOhD7ukKOUpyePxzp8NgeibGMqRT050dBfFaAA0ejKObnOTY3MoO0mqpDcS0qU8VGnyWCozlJLSpOpEHkZpyjOcDlSgc5Qp8HjKHqB6tKMSRapQS0fUo54Uppq0aN9E+s/cWY6iUY3kvZbKuaYm9alajSdIp0pVryqUkTblIlcnZ1ajftWSspMTVUcK0Ni59awN1elB62pOrIoVqlJcq+La6tM54lWpemXnApanOmUO5ZXnQ2ZkY1lMV43TNdP0Di83y9nOevazoP2eL6MZrVJF81uSdV9q6bdaVLbWleo67TPN1QAE/9j2trjNrW4vOxJ4yWtgToXrXcXkzXcppAF0GogC8ma8sOrTryktFAKSO4DlCo6v7xxuYXk03eRa93XYxad2oVtQ6VL3u7hrLiTXG8eYmte7zO2dc29KXrW+l07oTVp4EzretPLzvmfK79T2G9HDgpW98wQwkQTMNQJvtL5oBWx0r3fe+D5tu2/96eW00138WnhpEP6ohMtLYfhe16rDw7CBc1piD59Yve3NKoLd2+IAf7hqKnYr3Tps4xfLd8YyjnFgFSwiBr8NxQHNMYZ3XGEfXzjESF2xgmq84BvvDcoHFvKEUdVk8CLZrkrGMou5bGIvw/il813o7sjsYjP/WP/L9PVvXqlc5AMt1sFgcSxLIKtayh7Tz7DMrHwQ4KlCeyq0iE60ohfN6M2O1pmldZVsCUAuWVq6snpeCTMnDU3ZChopvrUmcDNs2P5yp7j0Ugiq92Vl3ck5yjpO7Erx3NI0R9jWcyarSAkrZg33OqiynimtazpiNb/aPcEe6rB7+uvg+nqshivrsosa5mOPOdq7nrazS11tqWJ7p9om9bbHLe67zZWuX+6rtZ9d7P+GFNzpzm63cY3Yd+813uKdN5CHbO+HhlvK3G62uefK63UH3OASg/eZK0rvLCe437PGN3/1DWf7QlzYEi+wqRt+bcxJO+MPRni540qm2+kX5CX/FTjAE37vhQcZzfve8rfL4oAT1xwuDnjtMnX+WJ7v2eeaBlHOKzX0Rhv96EhPutJtlWmP8Nm1gJ5s0zvydKhP3ZRAd3rWqX6Arnv962APu6hUHfaym53cKx/5tszOdq9jZO1tZztvRRLquesE7Rvv0Kr/pWq7O8TvIft33ivubl0r/M0wJ7yI6SNYtxW83YuPOYkN33LEM1zyxva2x7ON8lpj/tafz/XM/d15YnOc3acH9sWVXXpmizztjd/a41OP9w1f7eMu/yvtyx17qc0+9LBecrKZKniKJ/7hlCd97p/7euLN7ty/V3zwBd57pEX/+JeXPrJXT/zWU1vlseZ+/1eLD37hi5+t5G+++ZMf8eXHGfLTh/bmD/9k9VN/+C+5Oe70T/OtYz3qfXZ1G1F1sEV0Qrd0CJiACriAmyWAGkGAPQeArCWBVjdLl/ZnDpgRBtAAHNiBHviBIIh62idcxudDcBd3Zfd2KMh2jFITDeBstVd+M7N3oNZ3A9OCIzJ42PdyNMZ+DkJdOKGD2beD/OaD+AGEMDhyQrhV+AcaSJhhMWh/8hc1B4KDRZKEaQd7TTgTT7iEugd822eEQ5JcQViCPHiGRTh64NGFZviFIzhlYvgSVjgaWOiFzKd5VPgnbCiDUoiHtwcXc1gnUKiEbShz81cWgZgVdViI78d4W/+IKXvYh7tHchHVNInoHIvIh/AXhmr4g2SYiZIIhh2Xh4AYiZtIgproiOfHhZ84iFkYfnHohK1oh40oinDYiUc4i4yYebtXfYtiaMCYKTHgfw9IjMVIgQWYgQAAgRG4M0PnjAwYjdI4jdQ4LMrIjEGHjDunjRF4jcaogd+4jOF4giv4dWOHL+WYgoSYihlEjulYTcv4jmAHeDZSTfQYANm0i6DXTfcITgPTalcVim+oerE4WOl3iiI4kCynfJaHhnfYi48oewc5ibCIi93nfrxoiwfnh2pDcBOpkVHIkZLjkd4XkhRpex2Jew3phkT4kB3iiytzfUM4kw45eRY5fiX/uY4CiXw3iX45+Yrr15MG+ZO0mJEZA5MgI5M1aZQtaZOHyJD1h5AbCZGr6HgfqZA6KZKhQ5IYuY9NyZQ9KJRWSZT6GHlHGZG+d5Vf6ZU0mYZP2X4r6ZJYSYnJsSD8N2ACcJdRMXTeyI0/55fZ2JfQaIDVWJiGeZiG2ZcZiI2BaYGY5pgYOCryaI5zNJlfZ5IgqXaW2XUquJn9aI+jBpQCR4My4Y8GA5Apxo5YiZQTo5QseXkfgAGyOZsY0AJmyZNviXFdeZsMxwKnwQG8yYRVKZFkmYocsBsfEH9aaTvQp5ZteYcfEB3A6XBhmZust5vKGUTHGR0rQJ3CWZBjiZ3e/ylWK3AiPAECqDiFf0h/IGZ/IGCePEEC6bmcxcOVcVmLHYQCJdAB/UEheIGeU/mSaGl9zrmUa1QAtPIfXTED86mK4Emc4tmgK6SfHcCf5uGfPgGgAXqWw5mWxdmH27kbNDCehkiK7IljmhiduzGdEsqhD+qhEZqQaEYCyJmdAtqhBPqhUhkbLXAa8mmjLiqWEHqfYCljLJABSJqkGaChLYqbJlp5UXmS94ejP+OacrmWwemWTwqV7bmjWemgQioSenlkY4oUfAmZyYSmkzWYg4mYbvqmcGp0inmBaUqnUqemASiZm3mOm8mZX5qZ9+OO6diZlvmZv2UwmDmXxNWPNv94mk7WpVIqcg1CHhrwfPYZpYBakQ0RARHAnx3AqRGQJAZyqZCaqUHJENCRAOXREzJQl6SKojv5nE5ZAT2xqj2RIhnSnDoaqV6qHFzBE7baE+5SM48Kq16qhQ1xF7WqEJAxqiqJqYoalI6xrAKAGrn6qlcWqwaaa6bhE8HaEzniNJwXoxuKpUCKfNNKrY+xHsTqZtBqriSKn8inrD1BAf6BF5FhKbpKrokKr9f2qzyxARjaE+shriearcdakbR6IgFyrc9aqtE6pQ2RqruBHftKpGy5redanQEQHbiqr9jqagl7qgHQrbzhqg9rrLxKlRbxAC77si5bsA47rhibpVf/KqsW96I5yq9/upoDWqUF+poaG685G6YWUaaUk5eYcaZ2mqdNO4FseiBFF6dUW7VWm0tz+phPW4Fbm4x4Sj9S1q84i0SCWo5iK0agOLIzyKg9K0dpu7I3OhKUKm+/FrZtO7ai1xATMAHlsbcTEAIhJ5V2K5pTKAEMwADlcbiHG7ipN7hFWXgNobiJq7gwkHIG57hlWW96K7kKobiHy39n+0Rva6prFgAj4LmTq7gT4HkDibmqyZMqgLqd67kMALiuJ7gCFbo3+52GK7sCQLsMkAKmp5Guq61FOAG0m7qeOwK327i5e7dDO2an67kiIAAiALyr27zE+7yE623Iq7gm/yACJgC8+qe7WOSKj5u3wAu8L8C6/mpAoxux8te767u47ou3HhS/71uuHJsC9csAEmC5uOtT5lukWhoC9asCjLu9BAy9QqulARACEjDBFCwBelnALqS/+Eu0kKuyDNxrGGyzTomwztvA3QumHjyQWRuZXbuNgim14xjDVzvDNFzD27PCdVpZBbDDPNzDPvzDO6wQQDzEQ1wqX8u19sO9YkLETNzDQtzETQwS6Zu/6EspUBzFAnDFTCzFmWtBGnwvWkzETxzGP8zFr6tGXyxFZAzEY7zGPGzGxutJVTwzbuzDbVzHcKy2lcS/Dzw2dezEWfzHQWwUU+zFc7wkgjzIiaeMoITcxQ6Uxn6cyHfsxnkMt3wEyXW0yJO8xpVMunuMyVykyYEsyJ0sv5UEymshyqJMAIX8yIfMHaosyY18xmj7yvQRy6Q8y3F8vm6Fy39cyvu7bcULy7K8yq0Mv7bcIb6Mx7qsx6eczBmzzJTczJbsttBsN9LMydTsyc/cy8Usy6zsyMjszblszOLsP6jcR9lMxsC8wU2KSkfswuuzzmHczvLcwo8VEAAh+QQACgAAACyaALMAqAAJAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dv+iwAAenpKQIn+AADR5eVDNn6OW1SGJ1rJiRoNgHgsgWi4FzQsGAARFBCuaDv/wHX/0JerXQD/lBNpS3GOhjfJdCf/yYn/6M0wKFxOg1d3LGYsAABbRXtlMnRvhEeiZEX/oS8AVVUoKCirAAC4iCKqqqrbCxrPDyT/tFn/1qb/4b2fH0iiHka70tLcfRn/rEj/vm8AAAAAAAAAAAAAAAAAAAAAAAAI/wAHCBQoYYLBgxMkDByIQICAABAjSpxIMYBDABgzatzIEaNDAhVDVvwosmTEix1TdiRpMuRCCQliyowZQeHChg9bikSpsqdHASB1hmQplCJPnyqJFoW4sMPMpycWDsC51KgApD6VVrUIdKvEo1g5ai06cMLTszYFUvUKEWxYjWOXxi3q9u3PoFsHtjj7tMNNh2zbXrUrtmvguULr2kVsUqAIvmcnMAR8eDBhuIbZMm6p+O1mkQMkRID8tMXknJotX74bmCter53DfnZ5gvRZEWopp16N+fXW2TtVrwZOUYUFh8gdVtCANjdq2MIvEx+ZGTrvjNMlHk+evELf061jY//NPpH81+iEzQcwzl0585lp1+6+DkC9YN9VxSNVP7C2bdzgVUZffdX9VmB+6C12oFADiUZaTX89Z+CA9rkWXoKeLajTQo9BJlmEF1KoYWIj6qRfViWWJFUHEbToYgR+SSWfdfRVaCOGsqUImlQ89jiVbjRedyN+cuE4no4u+aikcyHWiGRwRNJl5H5PUrTkkjNO6GSUJHJp4pQoeqnilT5miaCIYpp0I5psKeDmm3DGGaeZRRpwnQEOIbBAZXpWZidveArQp4HtFWrooYgmquiijDbq6KOQRirppJRWaumllhKg6aacdsrpAYEi4Omoo+L052qBGnAAqaxuauqqrbL/auqdDqkaK6leLXCAQ3vmemJPDh3Q61YLBDtsVcWCCawAwrZWFQFAailkldSlWdKvSVHrbADQSnjmlhdaGxyb2xbVbZPTijuUtuUpm6265Z4rILiVwWsVueXqJO986YaLLm8VbrVvkACze5697eKbr0kDS1swwgf/O5zBgTX8bb/1SiwdxWxZXKTCBkJ8krspBfxstBc/7O+8GC/MMMof05uayIKB7HJFHktpc5E0c7XzzRLlTOLPXWqcHsdeCf0l0V/2jG3JSAsMs84yQ+c0yStFfbK3MbdstdEK9iw111R7HTLYGYq9Ndo5Xn0100ArzRnWhbnN9pFqLyW3mnRv/zTk3VTmLdQCdEp56mWhHovgoNAdTlji0GEq+eSUV2755ZhnrjnlgNbauQCO25UqraCTHvpbow94tJMits7666orCHvBrk9be+w5zj7x7bvrjjuKvq8evOzD/5518bkjfyTvxmerPJXMC29789BPbz3tzwNPvfbYX9+79xtvz/333ZdP/vni+903duuzHz3x4Kfvfvzwmx9+9s0/Xdj7GfKfvPz7o1//8Oc8Av5Of+rz3/IMeDwABtB+0oNg/dDnQARiRoHVE+D/HDg/CQ5QgwsE4fYs2EEPbpCCE+QgCX+CwfGh8IMcZKEIXXi/GS4rhhe0YQF1CLUWGi91iPPc5/9OFxYgBrF0QyzY5pbIxCY68YlQjCKkbjUqUOWJiqUSIqq0eEQiYsWIj6sVrLC4qVztSgCKK0qyfLhDEx6pWUALyd6u1T4Z8rCBZotj0KY2NAY+0I2B06Mc+bg0PybQkL0RJM4IOTc29hCR7BOcUOY4LkjaEZBhUiRFKLkuR+IRk8uSpL4YyTdP/vGFbdPkRDhJHVMe8o51U+UeydZHWL4SlO+SZURYeS9LEsiVidQlt0hJR2CWEJV4E+YwaVlIW+bQmZFUJi8T5ssVXnJiomzJNM9jzGvWUGXC3ObIuvnLampNb8SsJDS9GUFsSjOdnTQnOU2GTmY2Up7mzObL7Fn/Snyuk0D6LIk4a+ZPXEItoCEhHDyp40WkgFF0XAwjErcoqDR+SYoYzahGN8rRjnKHjJ8KFUg5NaskmpSiDfVJqsYIUjPyii1rLCgyqQRHWQ7UZzL9pjvDudBe/tOa5cyjIm8K1KLWMajg1CVRj2rUeZ7TXD2l5k+ZSlWEDpKfxcxpOzdm1UViVZ0G/eRMM8nTr8Zzqk7t6iajyk2tpvBoal0lW8fpVhjuVKlzJSha8/lOs7ayrifkal8BR8OtGjZsg2VZWE+pU8GWlbA3BGwyH6vYsUZ2qnGdJWTbaFnOOhavfvXpYm/Z2YMmll+jfWZqo0lZ1JZWrI2FqzAVGtp2/6W0Jw9FXUQhOtEjMq5IHg2ucIdL3OIuaqSasqKgkKupkqLUdNAdIkvJ6FI0wrSpkqWpRfW4VKem9bQEey1jD5s28DpMvKSNLWJbG1712tW9qWTveeEbwtWyRr4pQ69q9cta0G72kXsVqiC7m93Crte/laVvBu0LUPPmV8EGfq9s8ds1BmNXwNzNK04DbOGnTlLDF+4w3G5GYA7z974Idi2EL9vhzO4SxFT1btVUWWIR85XCZbMxZh1c4RMj1cf0hKoAHMAWB+xWt72V6G1Vktsiqse4UI6ylKf8xJMqObrPtfKVV3OALnv5y2D+sp2CFeYymznEQI6xqszM5gNUd/+7LYmpiVecrZrSGMYy3jGOa6njFvM4x2n+7p6b2ec0uxgiNQ70jVPcXvIG1tGTZfR8Ib1gQ/+Zz4rWs6QfTOkIP/rANsVzgVl811DXtq1z7nQoL03oTPt50PdM9VtBfedT01XWEqa1JhNNZwC/etM97jVsVZ1LWPcT158ur7Gziuz6lrrW/x32rHOtbNAS2StGTjJvl5ySJn/xyE6mGJXHTe5ym9tRWkYytzvibYeC+9vaVjdvGkDvetv73vZmJ7WTzW9nh2/NbS4zmXzUAGuiWdjjVZCdyTJwHuFk1J4ltmm90nCHGzzGEJ94XioeoYxLe9+RrgrHO97sSiNcfSL/G/lpPJ5wkAeS4ir/UR0PLvGPx3fjKn94yT3t7wnjfOQ6L/TJ9/tZkcc86K5OetGXEnOZszy9NY8lzHN+8Tz/2uhUnznGd77qqQO96k8nus+x/nWtW93SXuc40od+TLbru9pkVzvYuR5xXTPo6HMXetRRzpamr33vYp92v1/+c7mbPextH/tS5MR4xtvg3SqFfOTjHe51c6TdkxfZuTfP+c5TOd2Vx3IXRb/lywTczGNm1unZTHPB91x2AF+9l9983a3rXeFwjpuo6e7rZ+9697d3OVmBDWi3/9j4Qf6wrfUa/MEP39TRbrnzSb303y9/w81/PdyhnWDAJ971hId+//fBz3OTK9760Yc6+al//qECX+neR7H4VRz/49c/+aO8fuuFz367D/j9yGd7vud++ieAAThiLsNr92eA7fd/BXh2AXhoy5R+gcd/dbd96Dd+Fth7EchqsZZ95ud/GTZkRSZ5uGWCTIaC3aaC7PZknveCMBiDGAV68GZ5G4F5J0h5NUh6byF7YJZ6Plhm+zd9F5g7seeDtOcrDLh+RfhGuUdiALiAglYRISAAIZCAUciEHIgRH4ABXviFGDAD0hdyEYECF3AcFnABasgB26KAWjhsMgAZGzCG4QcRDxATyCETN9CGWbiBw7YBpPEB6rd9CyATeSgTD+AsbuiHjPUBtv8xhxV4cxBxAYboEE/xhPtEgd/3XoBoGzEQiSHHATNxiDJxAa2xiETYezFgGzIBApsoiR4wipb4FGzYMX2Yip8EAqwoEynwipF2h7IoAGfhARVzi9qXPC6wAtvRHe/xFK5ofwcWi8HIFyVgiw+ITwVwKN7xFDjgi4QnijNRAQKwjTNhitaoiW+HjMq4jO5xFs8IjXAnjTGhActxFtV4jhqIi4zViaSRA6AYfiWwiyhwisYYgu/liKQBid74fAGAAqyoiAVZfoWVAoE4iJIIEbaRiAR5jak2A5DRixZJhgEAjMPIhxx5ezKQASq5khnwjgvZdRPxADI5kzJZixuJjvDdqI8hWYcZSH9vSIcSCZPz12g/uZNBWWzWVoI66G5LOXk2qBE4mILiJoNUWZVWyTlZNno0uINbyZRPiRFB6GVAGJZfNoTHeJbQc4Syl4TEYpYGqZOYsXAEiJNueZQaN5STVpT/aJdZI4GoiJZ8CZRCyX0+yYhG2X8YOJf5CJiICZf9hZecppcv2YQ8qZiF6ZjpyJhICZnBJoWLxpnF55maBpqYdoCfSZhEaZh72ZgiaZmpiZk5qZl3iZp5qZqTuYUiGEd/+ZayaXOt6YB0uYS2KX+0GZnDGZu8WZk6ERAAIfkEAAoAAAAsmACzAMUAJgGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/gAA/osASkCJ0eXlQjZ+jltUjSVVyRoaLGhoq2c+/8B1/9CXKxgAshg5DhUSLAAA/+jNZTJzaUtxjjc3yXQnMylaTldXqwAAql0A/5QTeStkb0dHomRFwxMs/6M1/8qK/9uyAFVVKCgoph1CuCIiqqqq/50l/7hjXEV7u9LS1Q0f0w4h3H0Z/7BR/+G9AAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AEwwYSDCBhIMIJQgkOBCBAAEBIkqcSLFigIcAMmrcyLFjxocELIq0CHKkSYkYPar0WPKkyZYuRS4sqKCmzZoRZjZ8GHNkypVAPwoI2ZPk0KJGgyqFiRTl0aYTdQ7ocLNqCYYDHEKE6lRpUKZQwTb96ZXlU64Xz3LVKaGqW51a0UYkW5ajWKR3i9KtqzFvT78xdbpwW7UDw7hy9/IFANhl45OK+T5+qRbqTBCE3UogiBht5LqTfVYeK2Bxx9AiUVtcmCBC5qouOPNMXNr0RtUVcVP8XFb3RN8SF5Z47RbEzq2ea9sWSjR5c668vQKfOxqpwBUYHmp/mCHE2+Ny0y7/71tdb/m/ypdPT/vc8oDs27dnKCwbOfT0ttfrx296f/um18EXX3dVzdTZfeMxF95+CTJ2Xkzr6TQcccbVtyB/i/l3YYMayqVTa6/ldNhsyXH4oGMnQoahZClS9p91WGGW2WYj2hfWiqC1KNqL5pnII3o/BobVVBEUaWQEhmF14I0+Xhgkik3S9uRJUg1ppYW0Renchgl2iFaVV1q5JGla3jclZWXeeOZIYIapJIkIdqljanMmJeeadOK5mpt8jokXjr3VmZuguwEqHaG/IRpcAow26uijkPppngHjGfAQAgzQhiltlC5nqQCbJhfqffGVauqpqKaq6qqsturqq7DG/yrrrLTWauutuOaq660IEODrr8AGG+wBn/Yq7LHCatWpbZ8acACy0P6q7LPRQqtspQ85W6212VK7bbKZcsXAAQ+FCxUD0S0lwAHmNoXuuu0i9a6J7KL1br3ihkcAnEze6SSX43kZHlf72kimv1ICrJ6i1Ok5MEUFK5wfw+xJ3B/F6+nL78EBY+xxmqQ5/LBEEWeJ8JYmdyxyoiuPXHKJJ5tpcYYet/zwy3Gq/G/KC9tc8chQ4dyvzgnzPLHPGcslNMc97wwz0SgDjdTSf4L8J9KGqou1zxobXHXMas7M4tZSN0V1j2CHLHaOZJdd1NnoWW0e1nID6fbbG38NtcxGX//c9t0uwQ1h1l/VvHagfwNukuBQpn314YcmrrhIjKtY9+B0Oz735CdVjqbmdvdNs+ScQ5w32nuHLfrYTpdekeeiXY5i5qmr7XpFDEiK3rKmFRvvn6PeyPtivmv6u3m7Jq/88sw37/zz0EcvffLD89Wsp9liK0D1dV2P/fbac1+W98yaKKf556ffMfoNru/++wuzH7/67U9Mv/33Xyw//vXz7///+ssfzfbXvxwJkEUEHOABDVjAAM4PfgBUIAQd2EAGPvCCEUTgAgNVQQlm0IIYpOAHOdhBEIrwhB4c4aESWEKVpAsoL4Qh4WTIQhOisIVm2eAKdaiuGpIQh4XjYRD/hUhDIq4khi1EYg6N6MIZHtGJTfQhEIXCxCVOMIU3tOEU7QLF03TRi1LsYRXBuMUnjpGLYRziFTVYRiuqUIxr1CIbQ9hGKsbxh3Sc4xvVWMfbfNGPaSziHXc4yCkqkYyF5OMeBZnHNh4SjWcEZCTJE8iViM8r5CNe9r53SaVkUpPg42T4tDe9UprylKhMpSpXycr4GOtbwyoWLJO1SWbVsne3BGUng9Isb83SV9P6JbCCJy9yCeB4MZnXJO2YSBni61wPeaa7ugY5ODYSj0dr3e1Idrq4LdNBlWwi6bYJOzqF042LfOI4b1dOOzXTjN8U2DYj0s5BnROR14ycNudZ/89C3ROS7xTnPsnZzcH9U5IBNcs6XdfPRB2UkvE03Dwn0tCuRPShCiraROlZ0MYlFJ/pFKhGN1pR6mAUnBFdaOlKKp6LpnSg7Oyo5Vz6US6qlHMsfSRA82nNbI50ojn9I0J5qki/wZShMv1cTYcaUoUedaVJjR1NiSrDm04uqCfVKVON+lN+RtWcU22qF62qOKyGNYuIe+rkcvdVo+wSKJ+0Xi7lGkpbggqZgyPmn1rJ17769a+ADezzXilMYl1KmNKaa/cUOz7GYtKxnuwWYoGZrQYg4LKYzaxmN4vXk4yrXPbSKkSXOtqFSVNeD2kAn9yUAGquTo6wxSZXRbXaMP+1VmlttedZsTi6CyGgtle6LVrMSlpmUlWdvgWulYRLsNz6c7d6nO19fqtcrDA3aM51KHRjq09NVde6rn3acaP4Uu9+tyDhzdl40YnW7tL2vAO5rtmya9HiotS++6EufOU7NfqadLuy7a1595veoa0XpO3VWnLhOwD+4s1rqDvwTsVq0wUTGLcQ9iZ+hVpan773wsP1b0s3nFWP6fe8Du4JcSW8Vd6ybsAoLjDTKNzi6Ar4wzHGcDWL6mLuKhjG301xTFZM4w4nuHBYO3GQZaw3Fhu5x2kFcnWFHDgRi9a4RcaydG+k5CkzOcJZvq+TM4rjJdtLd4N7qyUhy0s2w9X/zWu+q6YgRec6J0CweM6znvfMZ74S9peGBdVkCXAtURrarmpWSS8HHczJ6rUnnz1maDmsZSgT0rSdNcm9Mj0STi/OypQWc5iv7CCyAo7IR2bkqCW60QCg2tI9TTVyu0rQDBsUwJf2MN9a/Wob+9rHSFbrVUFd4hKb+m69BraqZS3SqJGU2LiO9Y13/Wxbe3TMoma2U2kdU2vPlMTldTZQoQ3uDR/bbckO8K/VzTZhl5Xc2Ca1vFld7R0vG9Y8frG4veptpcY71PM+d9nSnWttI3jaquM1vFdtbHefeuEGn3DE/ShwqRFc2usuOMJthxYHYNjjXHEAnBU9co/EdbF1/8Ulw0S+8j67/OUwj7nMWZVok5ecIydvbMp1OcpDq7zmHXHWAYZO9KIb3eiUiubRl870eRf7fEJnutSHjpGoT53pnrZIpLOOO6dHu3CnLcqm7fVlDf+74dxGar+lWu5/VxxoF8+3suFZu8e12tUQx/e9M/7jfdfa3nQ/O+gwd3e8rx2sbV/1212Wd77Lnd1R9nu3AU/exG+Z4/V+LeQ17viqOhzZjZ975d3+eXSHfvMYF/22Ja92yrNX74HXdcIzL16Gh5va4z68OwVv7tIP/PScV/3B9Y17frt++MKX+MbtrnDd69bysG928f+u+eCj/vHunf7kq5/66++93WmPCf/ImyuA8TeF5UDH+c3Vv3O6pn8jn7IZy+U/8/rb//745+v7NZLzyLYf5fuXEf3nfwEIAAPYZv83Pg2wgAzYgA74gJWWfDUmgU+GQFZ3dUdXdRgodW1SWw1wZV4HfSwSdpAWTVxHEQzQgaulFV/3fd43a6wnJAzGgiJIgREIfjHoEirYJyAIcD6IdjlIJQyWFT34dL0Xfjo4hDTIe0woe5jnHjNYhC0Ye5fHfGuhhFJYgy8ofbMHhfC1hLanhdnXhQCChZQWgk1YhZvjIWY4haOneL5HETvoJmA4cRVog9m2fGv4JW0oht1nfWP4hGUYhWf4g264etrXE3MYJnUYfa//13lUqIehw4eEeIjIt4WISIYwUol+iH1/2HeJKINfmIVp6IhjFYdR0YelCIlvqIaTeIWcuIp4GHCouCh2dosJMAPrx3+7yIsJ+Fi/SIDaQ38tl3/GeIzImIy2UoAHGGfM2IsCCI0GKI3N6IyVsoFMl3Trgo1Lh4ZhGD8XyI3GZIDiaHQnOBFbN2lG+G8kmEwmSHY6xn2eOI8uGIhWmHvHp3ys+Ig4GIo4BXyfSI+RSHyauH21Z4c3iImniITD5nzP1Yn1CIoF2XryGJEC2YqSSHh3F3cW2ZED2Y8TCVUOqV0Q+ZGR548NmY8TqJD6CJKCSH0HaYotyZIUV4s3A5AX/8mPNEkei3eTI1lfsriTZIaS7/aT/1WSGEmQL2mQ6vWNcMiQRamSdyiUeaiU92h8FWmSgBiQngeVD2eUIxaUWxlsXgl6YOmNCFmVLnmVMNmUaYmWGTk74WF+81V+HUeNeBmMCPiMxUiMyviXgBmYf8mXAViNJKeXb4aY1uhzPOcp5Vh02viYRQeX+3iJKxSO3KiBktmOLpGO4kKZs+iDnOlZ75gv8RiTlTmTY9mVQRiVWZmUKUABsjmbFKAClsmVMDgRJjABvNmbE8ABg8I1p+mWvKUCC3CcyHmcI3CbObmQE/EAyZmcNhCchceRkQgD0ZmcFKCauMmFDJCdyfkAhf8inCF2loa4ESkAnsmpASu5mrkZABOgnsjZLklTnlKpZSMgn8d5A+3ZnYjIAfp5nBPAMtWJk6oWAwF6nDgwle4pfR6QoAsAnA1ToOYpPxoAoQuAAgzqn84JnRDqAU5BnuR3nyj1AiwgINthASKQneypllQJEw+anCJgAaZiAR8woRtpoPBUAKliAdGpoQnZoKsHoMlJo6iCo81HoilhohiAog+hotHZoi4qpM4ZADF6nDN6Kjf6M0n6muyVn/oZA/3ZnBUmETKQoCZAoDlaoflTAfq5nGPqkVxoAgE6nhSqpBiCAvKZAtxJpjVJEfopnmrapaipRRcKnjDAnHKaiQH/4KHZCaJ2uqZ4qhIqUAGWeqkVIKUb6qc8aREP8Kmg+qkSOqi0R5ypGadaKZFLSZGF+qLn2TRlaXpsipQ6aY97GHIfd5eKeZiE2XN2RYx+KZjCOqzE2me9imi++nPJ2pjI6pibGZmbOY6WiEaYiY2aKZnX+pjnKBGeCU2vumqjqWml6a3h2mlld2u0WqUU8QECsKV5YpMDY51JGZov0gIXoB0XkK8kQJ2x+nuzKpaqCgE1oR03wa+t+ZWTensRYRMEaxMQEKkHa5YJe4QScQEM+xBVYX7yhJWtSqV9ypoBsAE30bA2cQGkGpL/+K9OKV0nMLIYWxUbEKLweq7XtrI3/0YCVUGyNkEDMtuvFqej8+qqzdGyLisAhBGzXEqUCOulipqqZBmyVZEBAjAfVXECPRuxsjqxpFexNxECBJKxV6u0Esu0H+u0IBsAxFETLXCyqyqSWvuUEiGwxAGxYpu1ZIuqQRuIxPGwbMuWTGlgNquUOEsYNWCwdeuvb/uW1QEBjNu4jIu0fXur+Hi3m7qo6tq2cimplBukHFqmWGu4k9uxnYu3jOq3rwgVdNlfdhlyeXmsvyoX8we708FyxVq7tnu7q+S6zKqsjOl+y+q7noJRoOmq69hHnGu5ZZu3o1u5ZptEhVi8intSwvut0TutBUSKgRua0rtMw+uxpNu0ypELRNhbvemavB00vjL5veYLvrVqSM9rvcwbvsh7ve9bvup7v/F7vvULsJx6vPPbP+h7qvnLvgS8vg0UwNoLv/7rvAo8pcu7wPLLwPY7wAaMvxCsvw3cvQ/swOK7v9lLvN80vdCbvhRswe2DwCA8wRdcQSjsvSX8witcwQDsweTLv/8rgL+rc7q7uzwMvM3KLAEBACH5BAAKAAAALH8AsADeABABhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev4AAP6LAEpAidHl5UM2fpEjUpBcU8kaGi9nZ6tnPSsYAq0aPP/Adf/QlywAAGYyc/+VFv/bsf/ozggUEjIpWow4OKsAAKpdAMh0KFxFelNVVXgsZW5HR2dKcsMTLP+jNP/KjABVVXtSY6QdQ6UsLKdmQqqqqtV5Hv+8bSgoKJ1hSrvS0tcNHtIOIuJ/Ff+dJf+rRv+0Wv/gvAAAAAAAAAAAAAj/ABEIHEiwoMEGBgQIMACgocOHECMmFICAgMWLGDNmRKCQYcSPHycaOKCxZEmOC0maXGkRpUeQMBuKVMnSpEuaNTUiSDCgp8+fQIM2UBizaEOFDAIoXcq0aVMGRI3GVHggqdOrTqEKqIq161KtUqduteoVq1auZc3yDMqWLcqwMBWm9RoVbkSFBOZ2xasXa127D/n2dSp4MNO1PxNIWMxYAmKfbwFDlGuY6V/JAApXDqC58mXJnQ2HHvx4QAIFqFOjjlA6MuajAjYv/Qx4dF/bemnbxT2Xd9rSH1QLNwHU9WvKsnXD9V2WOV0Brx063ysgr+wAj08LF/7YOGbkm5WH/53ut/p1ztCjZzZ/nfzVxy62C3fx0zvo2O3TR3dPmH1y/cf5F56AlSEmgXzbSQCZeFKB5xmA3xEomoSDMWgUf01heFhPCUSAoHCs9WRfbfj9p9561v2X4oAnajgbhX2tRcKH25EgooVFOThhizDm1mNvEN634oNDGsbTCh0opKRCGICQ4I1BkngejmIVWeGPaVEZF5bNcekVT0kuuSQGwn2wYJS7lciiei4q1SZ6PFp5m5ddHRmmmE1ydyaPU6K5HJ3lyeljnH0KOtdjJtCoAHF7sqnmg4TmZ2iWfo4H6FVvZuchgiE2ut+jOzo6aZejPhepiueVNiOCCtanJUg6Xv956oClUjcrkakG9UEEvPYagZnFvfpRrHPeOmGt5Rl7JbLvteWsq5U2CKqsohbap7JzMutUac8GNWKa11Yrabifapvhpdt2++y3f5IboLmWoXsutj7CuxS36noaoLsR2uumvPHSC6S/2CVg8MEIJ6ywS9FN1OdLkk2EAFkPTpwfxIBJTPGEFv8n5scghyzyyCSXbPLJKKes8sost+zyyzDHLPPMNNdss8oY2yVxTiYdsDPPOnXUcEc4Ad0S0UZvJPRrMyWN0U1OX4QAjxv3xcABSMkGlqNoVXZW1Xpt/WnXhn2tNZ/XEUDsoOKimp/AlBKMtmxqT1ts27TyK6S1183/vVndepNI8JvC3gXwi3I7eh7gb+ONa+Pl8i2b35UxbqLjxwae5uCHK0W5YZavGfm4kL8r+WafDxY6pJgvq/mfnCf+6eJrAwk3qa9bGvt5qfe1eqiju3158Hn3rXjatVN6+3OcL0+d7PsibzfbxD8+vOmkT3483cl36XyguUu7u/GzSx/+hZ2j13zr2fK+/d/dm8p+vefnmP6bvev1O7XVZ156v6erTP7msr+79c91/9tb9lD3vsrFz1bzG1j9qhRAwwwwLQWkHvaEJ7oNFk975ePe9GwXwbhNcEvjA2H0RHhCWN3vhd/DVPpyBheHaQ0lDxsaRcDmo44NiIZh0Vh+/3z4oJsZ8YhITKISl8jEJjrxiVBMGRClIhIdTrEoVWTa0jCTRS5uMWJfzFgYdcajMn7KjGd0FBoDdKI1RkiNcEyjHNkYxza+sY73cWMe8bjHOdqRRHpMUyAFyUdAFvKP0hqkpRS5yEP+iZGI3BIk7TdJSjqykX6MZCIvib5KisWTkuSkJg0HymFFq5OitGQmDTlKTNLxlbC84yoJOctWmrKUpEzlJ3UZylracjKnVKUsY9lHYrLSmL/MpS9RuUxmIvORvGxl4YAZzVtWU5nPdGUyXYhLal7Tm80U5jC3ac1w9jKbmzTnOcdJTnCiU5zFZOcx5UnLdmKTntqMpz7nuf/PetozMMHc5Tvhyc+C+rOf9pwmQL+5UIZKJ6DrRGg7FfpQh8Kmm+7EZz7/uR6MVtSiHQUpRS+qniticYw1RCkVVbrShVjxpVp0aUxNGpOJRPGmOM2pTnfK0576tGY0hcnPouYzhVQkagRg2Exh6kWZNnUkSE0q0pA6NUfxMC1Xy9pmxDZQgXZ1S2QbjNm2esGyZJCE6uSmSF8IvQjRboTKW6tHH5pCBoYQfnD1nlzXWlcBNhB0D0zWXtNqyr5a8K+qC6wM50pSwhrOsIMpq1fOGlfHZlSi4qtgZBHrO8X2h7EhtSxAIdsXyXaFsnoV7UdVCxvS6sW0WEGt/Fgb2q//utC1c4HtVWQLQdqOtLYAXKBf7+rAvM7WtuVE7mM1W1rO6s+z8xqscoGJ27To1im8FaxvIarWA7aPfCvEawuTq9F0etB6dg1vccd7T8w6M7gcHK56AWvc3k63odutblmu25TsLla65UWffr3CX6b497MAdq/9BtyVAi/lwNHdLmgJlz4HKwXCAUuwQWHHXL0EFSQ23CoOL8ZUMDrVxB8OiVGvSikiTuinMI6xjGdM4xrb+GMplohRo1pUikRVqU8tsRhPPGSoUrUjDTCIkpdMNa1hTQAs9gpXA0zQg5IorFZTSAPy1ZYEWDgAGH7RhLlLXgX+BwFcZouXnUtA6GZY/8J87ROa0wyUNROXvuy97IY3ujk507nOXw7zv8Y8Zs7N+c8cCrSbxaxhK/d5iIj2iZ3nm9j6ave+q8V0a/0cadMo2tL/hXN+OR3pSbvVfAncs3mpTEFId9rU93lrnvGraeCaeUCHRjSspYTq67E6oqoWMKl1/elZZ/rX3T0vxzrtaTZjcNGDbjQ0vdtDZu8aXL3uILLLHOwFD/vP125Xtlkn6lpTONfgLnaqHT1tc78Q3XQOt6Vkve52b7u9gvt2vNXtawV79d7U1Xea5S0teve72//2NwoFzmWCo8/g2lZ4sgE+Wobny+H2gzi53V1oi6srATmGSIi9NuL/hPwhXf9EsZB1tuIhKuzlME/AjWdO85rb/OY4H9nJHTJUp/X4qFEDssqXSvSnFs1oUKNqk7f65Ch3ZcoS5za7LYXlsFHF6Wbhd8QRDuypZ1a4h71zpY3dWI6XEHfgPTUL681ne0d9uWDfrNg7C2oEl5vidO1wbp1tVmjDSdpt153ercv3yfr9t4gnM74fHd+wU5ruZLc11yd+a/TKV+3iZfuq365nxn8wvZhfr+bfy3law/fzl4/1uIFn9lHHvblzf27dI9x6dzMYK1rfON7LvnsUDX6/hT/t4RXfebdPvrC/J3DwYzt8Qsf59a9d/m6bD/jN5xv6e499m2f/5tr3nsJtVf3/2g/uddIfH+6Nl/vjZR/5xHcc+4TX/rO5z+i7lz7v8C/LzmWiEAdIz/+b4QAsZRQpV2Qrl1KdI4AJmHMM2IAO+IAQiET7BwAFyHJEhoATWIEYeIBBNIAndYE1xCMHMIIkWIImeIIMQRUnuIIs6H7V10kjwYIySIJEEYMzKINL5zVNpzUuaH9XhnVXMVZek3us933EZ3qV5z8qFHp4NnpVZnzl5235p3zy13f0F20+GIWtln6wt37b135HeGz3t2lT2GDSh13Ul4VQyGFliHtn2F9p6H1j6HtteBVEyD9yeH4BV4dOcYcGlIdauHB82BR+qEFG+H5cGH1VaHhX+Hdq/xh4X5eI2eeF8weGzkdtEpR24pd55LeGkGh+nmd5jseEY+eECaeHSHh9khh/lGiFlviCUriKwLeIwteIPQiIbCiLVNiKjPiKjxiJqDeKmyh6nfiJT2iMWxiM6keKkGeKXeeJ1heKSgh6w9iExRiN0AiKuaiMfZGB/fd/sqGA3giCHUiOLTWO/qKA6RiB7NiO7viO7TiO8lh0Qxdk9GiA90hGjtIA/NiP/viPAMl7qCiGAymQgGSDN7iCNZiQOOgo+MJlQxGGkheIlFeQ61F1cyGEZcMjD5kv7IKNyPiMIXlbyWeGn9KR6vKR2jiSUpeNsciNihggKNktKnmMIHmTK/8JjKK4jPcxk+syUrd4iGfHPO5zksxWk6dIkS3JkugHk5MYIT7pLEgpkjhpkzkpbIPIFBx5lEApkRPpkkm5jTvZhVDJlRIZlHOYeLdnhw5plrAYlky5h7pokjLplr94lVb5kmMZk2XZaVNZkUq5eGApiHPphkbpl115ibZXkoZZl4h5ll6JlhYJfkXpmJH2l0tZlXCpmYTplKzYl5eZmG9JlVhZmGx5mKEJmYpphGvZh235mKMJmINJklm5FFsJm3eZl5tZmp45i5aJaJgpmHGZioEpl73pFeMYc8p5MDXggULlnM9pjgQInSBGnSq2gNgJj9q5ndzZnTs1j/lYjuD/aY/kWY/mmTEiyJArmIJboZ4KGZnwSWgI6Z4juJD0WYI5WDY7uFWSWZzE+ScYiVVXdzZvaGBxKJSuZ5quSYvMZ4vxGZvIV5ueU6APdqBp+aCYaEKayGvjt3X+SZAfSoYKSogUemEWOpkYqmwItITVWIrXiJe7qZuduZdP2aLN+KIyKpvDiX8jqpUlCmYnGqJfuaMiepx0yYzs54w6yplLyps0+pk2mqQ4GqNUSpp6OY2px6Gc6KGzmZkwOqNYKoxaSoxcSqRDyqQR2qO2+aOC5oi4aKaUuaHY1qG6d6GIaKSNiaRfqKRemqN9moxP6pt6Wol8KpxoaqhOGqY8OabW/1imh1p8cMpWlTmorliokPqo/yl4EhoAhYhWb4qpPIqnVzGOAIhXpWoY4siB56iq04md6+idsBqrsjqrMDOe57mB4SmerPqBtoqeXHOfJMiewFqC/dmlpDSf9Gmfw6qswJqfYrWfXlOskRqgZaGRYjWgZMWmQWqsl5qoS+EBAuABRLmpnVpZEKECFpCu6moBG4CoftqUS9ECFaAkFVCvJdAfkPUCE7Cv/DoBJ7CgvFiLgcMDC1CwBluwGuCuVUqbTPEAqKEkqoGvFUQDB3uwMQCwlNqLqZYCFXuwMtCt72qcSpEaEJsaD3AufTUCHXuwEECiDDp9DhoRKrCyB8sBmf8KqkUaABVAsgohHKcap0sxATRrsBRTrqnlEBowtAWrAzf7pQwbABmgGiWbGhUQL3V1AkpbsC/goy+LhjH7EDKQtQXbrgYppJqBAlLbs8KRAYhzOjEgtgswAmvatXD4tQ3BAXC7ADkAotxacUpRAsIxtalxA22bPRCQtyIwtwHboOQCAxQAMhQQAh1LttIaiWibtgIgH2yrPpIjAhUbAo/7MRQgrpyqrXZbACJDARWbAnwbqSkStaqBAQJAJsKBAoUrPCxQsaEbMhNKtwZqtzAwA3eiJJFbsTZbtn0bqjubGiCQJz57u5/nuQYLuiFDukZ7XA2RtEq7A00bsn6rFA7/oCgK0AJWezpYm7Utq7gZK7AJtAFKm7Ddu7BpqhQOSyMoG0BCO7Q44LKLC7OBw7E0qwIgK7/wGgA0crLlK1wMoLQswL/ry7iphrcrC7/xa6WAGgCAKx82ILFgd7grm7gOHKV7Sn4bcAEmfMIXcLwV3KRXGgAP8MIw/MKbm8DpBwE2fMM2LLchzKgu6qhO+6dguqKiirEiTKhTasFI/LSBOq4sysM36sPe27o6qajfJRukCo4BaJ06Jp28qkOv+sXXoYC0OsZkXMZj3KsWuH8aqKu5uqpt3KoZmJ7DKqzDWp8piqBshKzuyaz36axWA61lU7k/rLBgBYRZga1DaLq+9/ip3lrF1OjEUgrFBDzASTy/S3ykRVypR8zCk7zClkzFfJnJGrvJQMzJlVzAQpylc7qldYqid3rJeSrK7EvKhGzKtvzJqSymq0ymrWy2d5yEubyou9yovZy8yOu6jHma/eu1i4zHpwfLyvzA/mupnlzLpYzLjqzK4kanRWinz6emvbvMddvM3pyhaNfEw9zDxRyprwzKNQrJIyzJp0zJt4zK2azL28zK3ezK3zzEO5zOT7zOOHumgyyy7gyl8GzE8lzP9HzN9kw/cprPvLzPvtzOwUyWsgzBtNzQ1tzRBn3RHqZDp0pfI90XqfrGXYzSNdUm6hjGAQEAIfkEAAoAAAAsgQAbANwApQGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/gAA/osASkCI0eXlQjZ+j1tUrGg9lCJPL2dnaEpxyxkZyXQnLAAALBgAMChcox5FChMRZjJze1Jjjjc3W0V7qgAAq10Ap2ZBxhIq2HscU1VVbkdHsxg4AFVVci5qpSws3AsZnWFKu9LS4n8VAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQQECGARYyLChQ4cMDiBU+LDiQwYIDWrcKBDhAYoWQwbAKOCjSJEkOaokiLDBgJcwY8qcmcAjyJMXV6pEiFMkAZ49LWbUuVMAgaBCjSKtOJToRoQIZkqdWlPpUodONQK9yvCnAK4Nm2YtiPAo2IVlz6IVMFargKhT48KsavZsW5Zf1XpVG0DsXQBp1QY+6/cuVLmI6fL92zHv2b2C2TJuXBfsYMuSJx9GHFexWs2OwUImnJnxZa6nrxZuu5mzVM92TYfmOhrzZMp8Uy9dPba1a5mwwYLmWxt16b+6kSYPyjur799zl/ccrner7dvScWY/2dzpc+gDgnP/pf7YuvHbgK2Srnwe+1vwwLeLJC/avOrjhtVjZn8f/Xfo4l1FH2327YYfa/oZx5+B/r0HX3QJCigbcQUqd2BvERq4oIUNwvVgePKFNOBVxfXn3obMZcihex4+GOBSIy5VIoMn5qZiih1++NKLSMWI1IwrgoZiTyEmlaOOPAblY1BA4lhjZENqd6FzDn6Y5HQTVjebiULaGCV3U3pXpYtFVrRkT00SGSZRZT7UpkPdsTkmfFfidCZOaUrZ4JcivRnWmjr991udJ915Up5g7umljUda6SdDjBlQYU8MIDCpmgZMJulbN92HQKcGZhopVAmUauqpqKZa1aeRIeTqq7DG/yrrrLTWauutuOaq66689urrr8AGK+ywxBZr7LHIvjrqWwQ06+yz0EJ7wKYIRGuttZYKIOpfmxpwwLXgOputt+GGO66mCJFb7rXjfrsutqCBGlREE52VknsmgUVSvlzdCxq/V+0rL6WG+nSpnk+ux2jC+y2WZXlb0tgllAtPrPBnD9cXcZCy8RnSo2spSjHGyG3M5MGJMqxgxR0vSnJ+FJqspsgXk0Zzwy8jGDPLJXuclM9MAboSyH3diGXJO0d288pKq3wf0A8VHBKifQpdFNRwGo2wxTjHhrSWPOeH9Z9jMxRnoFqDWfZCUltE9cdWc0T03HE/lXbVawfQdkVvG//ptIZhI5g30Xs/1HfQSz8dOIaD3y1ixgTKvHXLI9v8t4V5F+7Q4W7W7VbjjScOeM4YJm0510yfTnnNwkFOIspVi4754lSC7vDXENMupu1No6446VSafp3vo6ves8tewwx276t3PXzzqbeOu8a6y8m78WIjL31bm6pVKexwb3sXtQOrySpp4nMPVflSno9ZsvDHL//89Ndv//3456//rctW+26004LK/6J1rmWpa4DiSpe7ENisAnIrXQ1AgAQnSMEKWlCBDBRXvOwlkYTY62xDKwn7UGKTD3ruIAJwCZJKqC/NNYRzWZNdikLHIh2ByHEWcWFXwOc34s2OeSVrEZ3/COc6GfEQcZebYfXQJkTwEGo+RfzRETsnQzXREDRNBBARpxe5JYbwes8Log2f+Dguvk5yKfOhEoGYnywOaovKyx0bBae99mBxjHDU2fKwR8fKhbGNeMShmaJ4MjTGLolW9GJR3OgaMuaQkGiaYgwRKaUryoaRnHHkIM1oREPCrYqVVKTc5uTEPJZuj3/sI+vseMlAZg6SeJLkn0CpNlHaDZOJMWXwUMnK4/mxl4BcoSCjBstDydJsJyTLMLNmSTEK85WclKInewi93/ERQ7iUiyaJGc1C2vJzdeRSK595uzhSb46MC6fExumoZTZEhwuB4SwpWUt0UimbndGlmIQH/0xVOq+f2HRlOcfSPXtly0bpI+j6IuM+4yQ0K9RSlURVtVDS7O+iGM2oRjfK0Y56dFj9s6BIRdoAao30pBRMF7q0tUAGtiuDDVSpAVuKwJfCFAGgSYANG4AQnbazQQBbisBMCNQRhmSoLZSNTz+UraUOMZl4ASNAa6fOHinVhk0VJi3x9k0USlVCJXMqfLL6U3py1Z67q6qSrqojspIJqgOhWzOz90uw5kes4HHrU7cKt7n6M3rjYStTe6pVs/a1q8r8KowE+yC9lhKuuKnrOn25SrsiCK/QcawWIZsexXKMsv+0LIYw+xvNvpGzckVsVNV6tLtilbBlVWMi0Wo91v/aibFjhe1b+foz1cbVnciEpmvbqtu9Gra3tEWbZ9ca1tcKgLSZBOHVbJvGahZve5d1LnRziVrgrsWv6ZSsVZtL3OcWVrahTO4XqQtF8g7WvLG17g+vSVXxMne4792uNqU7yuXO7LhBE252y6vffHbXv5MDLWBFS6UCx8W0jeSv3RBcXQVbE7uj1e555btG+qbVvq1V6HMnSuJSNbXEJJbpAznFUKOGb6UN9ZSLk/LRGtv4xjjOsY53LGGCUAumBAggs26q4vFhkMgsBbIDjaytCKIUpUfOIE5lM+OLdLDKEOkxCoOKFKT2S8vKVGE7uTwveAZAnsjkbYB92xh8UkX/n3LipzgtfN2pisnNr4Ez2uQ8WbpWts8IwjNN9BxCPn/Wz6EFdEDJCbx98nLOiF6wou8p0EbH+dGT/vCfDx3oSidPj3L0cG1BnOBgtlPApwx1KsO7aSfdkdGfTvU5Ra1c9n6yhrDG8C5VbedRt/q/rz71QGXdRfVOl9QVNvVbUb3rWa+6vr8udadzHdhuRnKaSERvPWkdQkHHx7t6K6bBsE1FAHcOvJSmNoMdzWtI//XCvWaip3XNbmfHe73IPmSwlz3sZheb28eOdrKnLWxL7xnTnGZ1ohOe7oLH2t9nZHNnbU1NZzqc3pdud6Z9vXBXs5PfBldJQfV10Mg81Cnk/2uxyWGM4hSz2KI8jrnMZ07zmtvcVf0DspD9J+Uiqy/JSD6gS30uYqHXNMou3aC+rkxUfGG5IV4OGJjxQmZKsbBfZkZzyMzNTImnNuRFMbTH6TzfZ2u64yEmdsSN3V+KZ5vDs314vf9tdo5LmuFnv/t9QW1vdytc78CGe3rlnvG+b7zW+b61ts9K+INrHO92h7ffoY3221o7luSe5OIPy/YJu52b5qT7vQNe+YH/XfKLvbwxMz/PzSMX4G1P/CNVP26vg5tuaj53v+e+dth7XvZvJ3uHMe54w0Me8QLXt+C3TfxCI3zskUb98fGd/DKGvvd1R37plS/8uDc/7M8PfP/3B5996m/f+nwX/eTzLn3ov7vO6y68+g9vfsBL+/TwT/31O2l7CnM/+vk3XvsnTf33eZq3fIz3fRsxcv1ScugDY0/3JzEWKhCocjB3cxiYgRq4gRxILE82UiVFdBAlQEG3UkY3QDbVc9pigjSFgkhXUzrCU90VgQvhL7JRdTgRdUI1db+Fgyehg11WXg1SgMBXbq63ZmoBHBKwhEwoAXiVLUPYeeBUhAc4fszHFTGRAAqwhVy4hRQgVlDoHkRYfRUHgGWHhTDRAV24hiUgE2FoZjwYWWQYfGY4fGgYHmuYh071huKmeAjIeb43hXP4EDCRAnm4hikQE3xIe35ohQn/OHqxN4gO8RIScIh5KAEwsYgD6H74d4brF3kBGBThQQGWuIZf+BKamH6fqH32Z3qU14oiMQAkUIp5SAKoGIfpMYbnV4bv54lLMQCVuIYfYAEfcIm32F26CIuNWIfeh4Zq2IUWIAAWsIYdkIm4eExbd4S6J4VeZYANsSNr6CpruFSpqHb0R3rKyIudaIdXARMl0IXiyIVtaI3IyI2J5Y1pxnVkwxdzQYpbGI8KcIr0GIWB2I1U2Hp/+HpgEROz+I8IwYWYqIjXyHr5qI1dZ4+rdZAMIRMdQAEU4CoeWY1uOJHJ2H7ix4zkd4cy4SoPRpIY+Vv+t4y9yI6/KBUsORXl/whx5xiJu0iHM9mM7SgVGiAAGtCS9ViQ96iR2ZiQSHgWU3ECGnACRkmQ5YeOJnl/r3iVJ9FyqrJkP4dQFUgaE2ghJ0cUKSeWNLgWHbiWbNmWbpmBOQdTO6dkIohyL+iCQKeCZakT3dKC/5OCSUdlHFQv+jKRPkhCItR0/5KWQFhmfeg22Fg0uXeRSJmRkvhOj8k3kTmRMamOWRmKaaeTBPiSctiTRsiU29h4zvd4nPiZvjh9VgmalreJ11aSsumK7Heb7UWbmGebr9maufmbocl7/EeaE6eUkqmPwbV78od9kPh7l1mRqEmZCmg3YneSP5mSq1h/WrmbqliclQmT+P+4lI4IiNXpFteJlcFJk7DJk+m4Sby5er7JnsAJisI5m985muFZmu95muWpkNUWn7VnnF+3n8cZnWyTmYazmQc2nslpkfsIdqOUnrhpn/SJnesIlPqXn95EoLf3oZMZoaoJfqyJoa55oeppoRoqgBxamx7amT6Zodq5oeaon1Xpnt0pkzJ6hfG3EgwYMA6IGXvpoxWFGWOZIkMqckVqHEeqJm/5pFAapVJ6UQZkglV6pSuWpBzRLVaKpeMDGg3iHmEqpmQKpmWKHvkxprKhpmt6piXDpmiKIXCKIHNKp26apncap3JSp1TCp32ap3IKqHpaFH66p4IqJoVqqGY6qH//uqhveqiK2qaO+qiMiqiQGkKJijaZiqmXWqm/tamjBKqh2qmESqqeOpGmajeiqqqp6haeaqmTaqexKquSWquU+qqlOquNaqt4qqu7equ4yqq+GqnA2qu8SqvFGqzKtKooxKzN2qrPOqzKiqrSqqnQuqzXihfKmqvH+qvGmqyBWq2cuq2umq2N4azYKq7cCq7kSq3d6q3I+q3yGq/keq7mmh7oqq33iq/7qq6j+q6wCrDEOq/hWq/66q/lirAJK7DWqrCD6q7sGrARO7D0WrAGCwBc6qVF16VZyrFMpqULWJdmOaUkW7Ime7K/EpcZNJclqLEb67IjmJdDJ7M1pXT9/8J0hdld+fqpTmdCh3lUWcegO2uvDpuUCBpujAiZFEmeBAuvFouSPEqjotmhBgqxE8uKOTp7Ajo1QruvVgu1j9ijq2l89dmwDDuu0ymi54lCFPp/Feu0cLueK7p3NUq1NyqsZ7uuYGueAdqivemhQ8uvELqcEmqd4ZeiZnu1aPufTbm2ytS2Ohq3FPu02Rm1LFq3Llq1Ouu1H8psxGmjz7mwiqu3lRu2Uvu5dhu60Zq3/5q2hDuiE3q4FZq4TSuxewugYkuiZGuitvu2vbujpnu5U5u5dyu6tTu5Jzq3w9mc4Fm8qzu6rcu4qem4eAG5nvm7kku7pcu3uRu7JYq4i/97vNoLvNx7uswLuttJupS7vsk7o8I7skv7PUtbNCCrERnbsTBrly9npGlZNCj7vwAcwCWrsgzEsnrpsT9XvwZxvx/rl+8yLk72gSJlswGDs1+2uUV7sDfImC0xRkE7v1/ru8grt+3jwQq6OV2bwTyrwvzpKSactJoJwhjMungrvVnjbVn4wbYXuJxpIzg8FzoMuJwbosj0wzsSxJrLww3KUC+8tUq7w0OsnGthxOGBxM6brjRsvNubFFScAFasulgMvTV8u0HTxV+cvtErvuG7xWXcxH4rn0LMwoLrulPsxpj7t0kcxYNbx0hyxjs5xiI8vu0LJmZ8wi+Uwln8vGr/HJsWUsgwvKAyrMRK3DiO7MQxDMVy3MNM3MeGvEORrMeJbLRGasfDi8dXrMFirMXky8WkjLrEC8aovMhpzMadU8lvPKB5nMlLLJatzJfYKL9gmb/wq8AFwcA/16Ttw5USJcDM3MzOvIEEjEAGPLPE7GMi68srKMzYfIJ/eZd/ScFCZcFSN8OprMgI8rMW0ZgE08nxhMjlHMayDJ2miRXsfGbuHM+qnL3cqZvod8dwnMuhHMu0TJ1968+4fMorHNAJbcNqW9Cl/M8ITbQKLdEM/brU+1vWG6P6rL4bjaP8rLW3zLWfrMuT7KB+3J6AzL7YS8KWS7cPfdCwvND4bM6r/9y4Du3KphzTFD3T8DzQDd29hvu9s7vGKj3CKuq+Lo3TEK3Tc/zOAl3T03vT55u6aJzSHW3VLB28ST3Vr1zV+bzSRo21Hw2fIf3EcTzRTU3GUQ3U6Cm7blvUggzWYn2fhVLPWvegaB3CUE3QbM22bh25ck3UVy2I84yZj4zCI53X5KzWfG2+RCoADqAlkQ0WDnDNj13NA2HML7tia1PZ3uXZzxzaoj3aNabNl23aSprN+LvaH4vABPUvBxDbsj3btE3bmeIRtZ3buq3Xg03Tfeotuh3csp0RwC3cwQ3OXSbOO7jYgSzYYoLOFaHOOXjSZevcYW3dc42idX3Yh5zYTv8t083NyNrtnQYt0pis2ET8XcxZfPOH0l993Ryd1eX7vlyd017t24Ed30fd0svL3s553z0N19i9z3RN3i9t3mf93Tvt0xYt1f7dvEzN2/nt0QXezwdu1gCt4GnN4OpduG0t1G/d2wEu4pZZ2JBi1/cc3rMs4OKtvPhZ3hge0RvO4is+yFrd32Pb3tWt33EN3xQ+3hau1DAN4E9N4uC91z/t2Dn+33/83j3+5AQO5CAN45ec4Dxd5PKNu0quuzrOuz6O1V9O2P1Jz9ztyeet4Zp8tNTt5VDO4wPe4kiN41uKEJOtMXV+FZ6N2QKh2TGr5xhr2and2SAD2qRd6IZ+6PT/g9pzrtoJ7Np97uj66+d8jnKg0QCWfumYnumavuBGzumdPuO/bdzCTdyifty4BR4yCMpo3l3QfRFXFzA55VyqfuVHbuPzvdVo42A4GYcSHub4vd83/uINJuskPescXjSeKye6LhU56esjPuFgnt0uvt341Vi8ztw0Hu1RPu0GLibLPhPN3uY1Du1Ovu1xLuzeTuzobexI3uB9rUzfPpKQ1eviru1wzt/oruzqvuqSLMXIvt4hFO8SOe/Y/ulpbuIJ6l7WTvD9rsswCnoD9l7sbvC7jPBIW+25xfATT+4GqeannlnX3vCK/fBkjvF5FfIb7+wlPuaGbfIgr/HF7vAm//3xpYXyMT/yM6/wGZ/y9V7u9x7s1B7xC8/zbz7uwH7rch6yI6bMqMICgK70kv70UA/p8Cvon43oWJ/1Wh8sij71rN3oXW+/Ui/2jA723ALbpV7bt10SaZ/b9F70qlrcbT/cbCH3c99BGHv3s43c86LcXfb2bl4UrZ5liZmzgw91aw6+gW/02Y0BAoABQB/5QV7fS03ktc7xoswQEZABrpIBnr8Baz3WEE/lkHzmtN7CEeCQAsCFKhD6FT7lF17lGX76B8oQXAiQChABjf36ZE36iG36Ki7PAZABt/+QXbj7Ut77sV/6Vh78hO0A8Gj8XGgCSc77o7/8v9/82U7YIv8Q/au/hqDf4R5f5u3s3bSf5igQjtLPhSLg7qJf8mUt+zIO+JHY/d5/iOFfoBfdGBntn+e/ywCBQsHAgRYEWCBIUEQAhgIEEGAYUeJEig0fVsRYEcBGjh09fnSYUWREAiFHinT4UeXKjSlZvuToEuZLhxAjmkj4wcKHhAM3RKx5EuVFoRhnsjRZlGJJAUoryjyqEmpUj1OpxiTK0EFPrgpMSAzqdGJYsRGvVm1almRStVbPur0KlypZhhm6Jhybta3esmc7si3LVC1QAX7RGv5bGHFLvgHuDoyQ1+Zgun0RAxYreHAAuVE7H/08s3KACHdVUBztNLXSxZidaqasuLX/7Mu0DaeOkFt37p+SN3Nu7HT2b9h7FzM+DiA0zNVFmwt9fnL45uJll9O07fc60uBKo4/8LnL64Opit688LzX72+7O20N/L92wAYcOqNcf7MChgcX0BfBHzD8A5wsPI/3iE+nAyco60CEHH4QwQgknpLBCCy/EMEMNN+SwQw8/BDFEEUcksUQTT0TxwQD3649FAv9rEcYVZXxxQL8EjNHGs3SMS4ADfgQySCGH5M+hIY9E8oD0QFqvx+SWPMyAJKf8MSUpqUxyNgYGY0BJAbZUiwEoE3uyybnM9MxHMMsSU00ux1OrPNXQBI1O0exkDk/sFrSuPQwEwEC82ohz7QEI/w5FFIIHUNOTuzIfPa5AjFILAQINBNAA0QkogjMwzCZYIFRRRfUgL0hnO7U2Ps1rbIVQHRR1BE4HvS8tiUYYNVcITI20UfR8Va/MVefk0wNRYRV1UYk6zQywB3KFdlPCUr0NWCaF/a0yCI51KNc1A2D2NbYYgBbaCsCy9rBeqdUOwaEWBJVbAXINYVlayWMrhHKhVRY4dtn7t8dhvdMLV3mhLZWhcJWCLd59c512XYlR7XVg9yZ7dlRkRz1X4XvjNOlPCS8AgV+LAj4T5TQthm+yCnLdeFQwFy5KswovyFXWkyeuTeU6WT6JLA5yvUAAnHU162NP0xI5QpJz5SBiiv+n7rnibPV6WVQQnvY26WoJtZWhh0XteGeqq/X5TqDB08vhh/sFV+lmww5g6LGlNbvqs9vFdjNtx25hIpqFKi/rcutFN+08Fd/zaj7JfThqe7+ulSK3u06cZ7Q1b3ftdynK2NxZKceXboY4eCB11R/AW2q9X998Ns8zSm311ROevF2wr2bcUc7Zm31SdzMa/CQ5Ce791+TVC/6p4Y2SW1zT51z+2t8Fdvy3Ge0rnXsGXawxR/FnbH4iBTdrMEX112e/ffffhz9++een38IZeaQKx/DvH39//28E341mg6UkFclHBETSmLBSPXWh6koIFJKVIDgkLXHJS99ySpsYSKb/653pABhUSps++KboMcw1yOtgmjaIlfL5ZjPFG8nx3LNC5KTwZ9l7YQlrdsIZ2vBONFTO85zXwtyxZ3d+SxcH9wYwqyGRiF7TXeWQCEQF1lB2OBwMDEUiQ/hQMYkLbGJsnugx0oFseihcopN8mKcxAqeNWswIF4P2RSvCjm9h3MsbdUg4HnZxjdjxYhslpZE9Gq+Pc/RiILGoFjhiRI5sS+QfuSNIIY4uiqXjnSSVp0nmLdIyZVxaJtOYMk4yiZJ6BOXcRGlHJo5yZZ4USyMr8sihRNKVN3Si9goZw0NCspQNZCX2cpnDVEpvlbELJimvOMws7nKLvazlL5WYzFcyKZORzowjNGlHxyDaUlWwFM6LvLe0cTrlQPiLiv7+t6MAsrOSEjlffgICACH5BAAKAAAALH8AGwDeAJEBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev4AAP6LAEpAidHl5UI2fo1aVTFmZmZKcpQiT5lgS6EeRaNkRK9pO812JCwAACwYADAoXIc7O7QkJAoTEVhDfWYxc3xTYqoAAKtdAM0XF8USKrMYOABVVU5XV3IuamRMTN0KGdt8Gh9vbz9fX19PT79wMLvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQwCMCwocOHDxkcSLgQokWIDBIe3Lgx4YGKF0MGyCjgo0iRJDmqJJiwwYCXMGPKnJnAI8iTGFfqTIhTJAGePS9q1KkyIYGgQgUcRQpxKNGOAhDMnEq1plKmEJ9yBIq14U8BXR061VrQaNiGZs8GGEt2YEKpVOPCtLr0bFuDXMN+VbtWwN2yV9WmPcv27lu5iOny/csSrNq9gv0yRhiYcOWwhdseRhxXsdrJlPlCtgwawGDMl7tmJruZ81TPdkHn7ToadenTqlNjXa21tWuZsMPedny2du7hdVEnPy476u/XuLEOFz17t+TJ0ZlmR8r7qe/nL4N3/53+uLr264y3B1XfszvR7+DFS29Onbjt5sut5z8/HC74uezhRF5x5nGH3l8BnpSgSO7t5Nx/AOrG1IB6FbjegYZJaOB+BvYHYYQcBkUhbRa2h6FmGl4YookefjiAfBPSV559zGGXookr4tTgSvA9ByNSI2JlnHXI8bVgSDsW9eCHP4ooI4E0EomfkTfqeCJrS0LYZE9BMjUkf1NGlqOCV/aW5X9bCvhkhVGCaeOYDFZJZotMHnlRl0h92WGYlsGJZJnenRmfnRbhGZSeFxYpJpV0akkoRAZMZkCJJzGAAKVkRsrYpFHdZB0Cnp6n6V+cIpDAqaimquqqVoEaWUKwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyCarbK2j3lUqAdBGK+200x7wLLXYUnupAM22xakBB2QrbrTbgjvuuOVKmpC552ZbbrjtYovAcKEGJRFFZ6XUnElhkcRvV/ra+C9W/tbbEwOG9oQoi3wqx2jDufl555oktrnnm1RKbFGSW8kZp8ZZUSwkpnEq2ufDGC/6mchekvynyQ5HBnPEi7Gcp8tJzawfyul5/CfIDyWM08JW6nwe0GIB+p7PSSHdkNAnET0nxDvLTPXRNdtYH88IMr2x100p7aDTDD0atM2H4vy10RtynSHZfcENtU9qh822im6jCLfZDs3/HZLUJV/dttUpn7yy1jPmjeXeYD/EMVSMy422wnU7LjaPjSfN+N04Zt3z1oT3HLniZo5++OeJh9616aQJjvfpXYPeeuExzy56xp7Hnrrtq+Ou+tu+x4Y4lKQHyvp9tNMMe4ayI3+7yrwDD71wkw9dedKcW7m5652r1S1ZnKpl6fVocatup5G5atn3WpVqsInqo7bs/PTXb//9+Oev//78998r+0+5VryqJcABkmtd52OXAQ/ILXgtEFrp2tS6GoCAClrwghjM4Loc+MB5Ned9lZqIQvL1uIPYhIQlASFKSogXAbjERVYZGFMQVr2oka9sl1PS8WrUM/84KnNPqyHd/yyWKO5pr3hL8yGa+BbE4bEJiWMLnvO6psRBAZEhfrsI4F5mRDJtz0ZV9BETsSjEv92wL9nzIhQxF8bfpOkkWbTIFnPWxY+tUUltdM0bRRJHiMxxbXX82R07lkfO7DEkfXzIH+0WyKYNEiqFTMwYA5BIhyzScmm04+9QFEm5HHJiTqzYI014xbiNsoWd7MwkK+mVM7IQMDuUEhhd9KJVllGLrsxhx2Lpph7S8pOFuqUcc5lJQW4SS6msii1DOTIiMix5VYseJ3+5TNQR75ilkyIPqUjNUrKSIZfEXiO/9kVfwrCaurumNBenTVmas07eFKYfiTnOsJWTm+eMJzNb5v/MotXTcvfMUDKho09rPhGbxmtnL/EJT7gBkCjhy9e2qPRQnbgvfSqMU0VXUipWeZRVb8non/xH0pKa9KQoTalKV5qsjaqkVBmMaUwbAFOZ2tSCCJRgAx/IQAV2MKek2iBPISjUoXrQRgmgZQMSklR4DkeGSCkYCp8qUotItV/DaeqHtqXVJeoScgq92PMMJ7yedfU/XM1nMR2J0KXxEkjNOSt40urUf2rulLAMq5OQSku6/nCt5MRrY966V7P2lalqtWv5Aqo3vXIprocVgFzd+MrBOtaf0MTa8lA02d/41auAtadg3VJKdGaos675rBVDC9DRUoawj+Wri1Qrxq//khK2mB1r7agn260itq6ZHdw6szm98UB2tr/9q2JxyFh2Fnc+vYUQbSlr2xbidmrBfV1Zu4ZazkxXj5Ul7XUDl93ubfe0ke2uJ8P72stiV7fKOy9n05vY8h6xrVF8boyii9bkgna5psQv5saLyOP6VrL1hW80pyg9svLWsMhFMHAVrNnhJlS/cOXvXP27WgBPkr2mKW1BuUvfCfcOw0W0rxq9d76PuhhVXH2xi4HqrJBiNDIu5chFLRO/3LD0x0AOspCHTOQizyrHGyngAq31lqESIIJB3alRi8pTKNeYWxS86U2p3EF65UuEVcUIiE/YLzIDbMwu7CZU7fXNAISz/3ysvauA8dhNye3zZv1874kdvE2B1jl3zNsdgxuL4me+84d2Nqgo57xL95L30F5NdDoPamG3OpqLs8ynpAOtzkE7l8/uZCiiAY2i5vWZ0KBeqJ81TWosmTrUDd7tqZH55826WtCzJm6qxSrqSLfaTK9WNaplDetpstrWwMZ1sT9NbGHT+tjyvXWnc33hXacY0lbcdKmV7WxdN5vXq27orwMVbHAPO76eNtNAaWLabU972d5GN7WTWOtoJ/vd3a72t6/d62yPe2nl5nes5Q3vQK0bOO2WNqXTrW+C55ve0H7wpBdd6fxa29D9rq22FU5xhlu60LnNOHUdej7xTRTH5//r8afCHDYkH6SjMv6ojS1j5Jrb/OY4z7nOmZVyJzM5Kk62sre4vMB3BZ3GQ5dylYluwKP2jOUOudcIy1xdwKz5YGYmGJqvjpOrAqzNb8ZhnBfr2hATGJSKbmbZPyzisTP33w4KOMYHvuB5W3zfsU07P9cu4ubGu+7Q1Tue+X52QKpYk/YmN7fNzWyHM/7vFZY4pxdu9wFfmo6HN2biAb54gZ8b8A+/u+MLO3G1MxqsINcz3SNv3DunLc+P3jPeQ7564Ure3ZQv+McvTvvPsz7wpd/76W97ecNT2PatF/zrCV98RmaerZuPe+fn7nvk6170oM+w8ikHe0wfX7sex/7/7/e7fet1H/PfN2/4LZ96PspTkfR8fmCHb93mn8313Gd++70v+9HnPfiDR395tX/o13/ZR3qT13GVp0P2J07yJ1osplMml0suZxA7hhoqJyopB3VJk4EGsnMgGIIiOIIkOD8VWBDf0nNTZj465VNFx3QDJHTgA4PxYnRGNRxeRnVUhUJcVylbx4EM4XUEg4Pv54Dpd18Vx368B0fN0WZoVnjOd4QrFn2Y44RVZ1kEaHwGOH7a1zVW6HYBloQMmIUhYyNf6GFth4YbZyZn+ICtJYBYuITuZ4ZFCGdqqH9yWGB0iH+913gHSH1+yIUIiCJtKIWIt35jmIdo54V1KHZ3/wiH4tWATdQzhbiF1hd6Sjh7TLiH5ad61Qd+C9hoZHh/lNiIaPSIYiiKihhMnAiAnheIl/h4DfeHatKKCXh9meh/fQh5sTiIWFKJtQeKuJiImjiHpciHngiLwoiJxKiLm3iMnRh7wah+oYh6q1iG0OiKgMiLyyiLu1eMepiNt8iMqgiOBTiNSBgWJ0gQKSiBKKdTHngh8Wgi6zgQF5gb82glJbiP/NiP/liCWnZTNNVkQ/VzCHB0LBhlLth0NNguMtg+DXkuNlhl6iUXSzV1AANmU7UvQDgSWTdDP8iDHckAFRkXJ4dv3ih+vbiNsyiIPSFXEBCTMhmTLDATJ5l75P9ojeaoheg4hWFxVgmgAEI5lEN5VjepgMNYjs4ojZ9IjVhxVhtAlFK5ATJxlKaXijq5lPzXk4fYFV0FAVIZll1llcKHlcQ3ikZoid0YFF3FAWEplRwQE2QZgGZZf2hph24oZ2ehVSfwlmEJATAxl8sHie11l46Yl2SnFk2VACPgl1I5Ak0lmPlHmGYniYdpiJr3ky+RAo4ZlifwEpJpfni4k1Golk7JFE0FllIZAhgQAn8JmmcUdqeImG9HmWwHN1oVlUSJAQKAAVMZmLEZf5gJfYiolLR4EloVlEQJK1KpVaFpQ+fHk02ZjtV4ltf4EF3FmUPJnEPpAnIZnNFZmlz/mZnVaZfX6RBdxZjbmRBDCZnfGZ7wB59pOZ7EWZ4DeJ4NcVYtsJ4CMJSA+Z5yt4stuZIC+o1aeRFytQEcwAGwsqBUWZXgOZoHKp3KeJpIgVqwYpIRapt9J6HHKRIYmhAaKp+tRKKXaZrUmZRZ+aEhgVodIAAdMKIBmozcaKGvWKMpippxYQIdYAIyOn0FqpJryZIGyqIIGnMe9ZwhMT4m2hf1KBD3uHLvGGX5SCZVGif/mKVauqVcilJPCgDtqJAtmEBkOqZmKqabQoS2+ItN2KZryqZvWhps6KaMGKeBoqbiSIhyaqdLQ6d1mqdwCqhzuqd/Wqh6yqcOgqeGeqiE/8qoizqognqnfpohitqokoqoSlKpjkqpk7qplpqonQqpjyqqnIqpmfqpgTqqfRqqpOqpqVqqqLqqpgoVmvqqrtqqthqrtMqqshqpvaqqvwqrunqqs0pKtYqryHqpvjqslHGswXqrygqsoFqsqIpmvPqsuZqs2KqtzBpizjqty1qF1wqu0sqs1kqtLfSt5Cqs7NqtgKGuxBqu8Vqu89qu7kpa8Nox+aqv4yqu6Gqp5yqv/PqvjbGvu3qvxtqvAyuwB8uwCUuwhBqw9LqwE9uwFfuwCPuuCmux9sqt/uqw6QoaXxqmV/akJJt0I4t0M5iQJVumUdalMBuzMjuzw5KyQP9XkErGkCyLsi7bsi3IQS+odF32QV+GLzq4sRwLrUvTgyuUQvkCdsJ5sSELsYWJn2SEjEPEdwaLsSjqk8mnjaLJoVs7tcM5f1SoJDPKlNkarR07oENai9FoRk0qsVKrsbQZhrfHcVdZnEm7ttvqtjb6f+NIl3zLtW37t0XqknALttCptUhruNPptcA3uINZl3YLspfbtV05ubiHlDkJuX67rpG7ueTHuFkrto9LtppLnpyrt2VZuKp7uKJboTnahZQ7mZZbsKmbufRptnl7bziZkh9bt7p7twkHvJ4rvPWqtLOLo5JburcbtrmLr7tbvGULgWfbMWm7lcw7vLKbi0b/yopxi0tze4XUS7Xeioq/q3goeaNs273LS7vPa7udu7f2ab3Ee77X+4ZnYbMTWL42G8A9m3RX+icFnBQ0m8AKvMAJbLMHibMEuXQ7u7ICfKYlC7Q6awBZFpAxlYMZabRnZr7NWr362zVMu6Qt8UtQW75jy7vwa5zncXAxkQAr7Ljo+4RUIsNzUcOoe8MiXJnpo8KmKJt0+73x67xxosPhwcPTO8I+DIY94kZMDLsuHLreO7oGLMRYK7c2jLn427tho8QvMsX3W8JGTLFgbDliTMNDHLVn3Lce24yfosXjO0wsTMJO7MWRGMQwRMYqCrpxjMZYnBRr7MefG7svDMeA/2slhdzGd/zEeAzEPEbHpsvFPazHeby6hEzJ0du4l5y/mZzGHcjJ9fu6ZRzKiQzIi2ylpOy6hHvK6QvJxisoPmLIyivIVnzEq5zErYy89vvHiJzLuIzEWdzHjtzFoBzL+zvKMOS/EkWBA0zB0QyR6MNjSOpRDJzN2rzN/OjAPpezMaiy1FzBaKqQGBzOQlt0HkwwGnm0sozJIXbCFyGEM2TL7ou4zfu+8ku69OvKlUvFX5zKwUzM9dm6vmzKwFzFgazIiUugz1jJ5IvMb6zKDf22D93Jp9vEypzMOGyYlHTMnzzRA73LrAu9pfzKCR3QwszQQhq4i4vRlqzRRSzQ9/9JmqQI0XYs0TSt0gTtu18L0xEd0jttxoPs0wbNvsF7z/mMz1e8zyXdzweN0oes0Pq80NZp030D0jL9wzPt1AVt0v6MuwBN1CtN0S1du74Y1f8My11t1eaJ1ZOI0/P0yPDc1iT91VCN1Mmr1E3t1lRd0S590Se91ilN1n7N03dt1GCt1mLN1lz92FBcSjbrADNC2WHhAOIcQJkNUZttUZ3NUWOE2Vck2txc2qZ92kNGzj672qzNsxbs2uXsLE91ALRd27Z927cdKR6B27zd23bN1Lrcp+DS28Rd2xox3MVN3Os8Q+0cwi2M2MDdMfJsVR8ZVfZMpEud3X2d2Ni7vpz/177Yvd3RzdLgq7iCHdbSO9aoXNYjDdhoLbiD3diFvd6Hbdjczb/eLX3gHaTird3Bfdbzm9Z6/ctTDd3+Pcz3rZf5jTnbe45DTd9VneCJmb1Q0eAUWt8QPt5XPaHYKNfxqdPs/dcAzs8C/t1JHd7/3d8qLsfmbYwebklu/OAbLdI1zeE3DdQ5LdQhbuDl7dAujuNzDeIYPuNF3d0/Hd/p7djPbd/uHeDwjd6evNVLnuEj/tQlrt8nzt8pvuUI3uQk/uSMneTzTeQ7zuRVjtdXzuBASqMRruHtfeaKndcmvtco3uUHTt4s7uMhMdmVfRairdqw3dorSzaiTeiofeiI/57o/gPo0hzbgf7oje7okS7bzdEAln7pmJ7pmk7lXI7ndl6FyJ3cvH3cok7cpF7qvJ1VSsVev33npDTdYua0VAfrEaHqERbJrd7jAaABAqABX/7eLz1ft/7OHC0hEiABsHLsEvABRh6+HY5ew17XkE01EiCU3KkAKtDsLR6OJBbtxT7lktwQ/EmUEoDfNp7VGvYc24Lr0548FDDuRGnuzn7jwn5g7A7up+EAy8meRFkCCn7ucQ3t9k7sNB6HAVAC+96fUsnstWm1H53unsXq7S7tdYECUnntQ0kCEw7XVwthAy/t+F4ZJHDx/L7wDc/xD+/x0iXxIW8yFr+bvRmWGv9/8gDf8d3+8d/O7skx8kPJmq5p8jQ/7+iu8v0lwrne6Vet752pAP6+8TWf8je/8kY/8cW+HO/emfK+7Yso8FJ/7zoPEZ1Z7v8u9AFf711P8DIeHdX+ltme9Xq+9WZf9F7vw/shARRw93hPAQw/9lovvlEv92hf5lWL8scbKCVJFese+EN+m5YJ9VwP+CD/9R49STZ7zawCA5+tY5mv+RM8zudj6KMd2oo++qRf+sfC6J4v6amv+prd+a2P+gE026ie2/E8+7h99J+O9Oka6rYvQmDa+7a93FHV3FpH9QVv5phD61FX3Wym1epN5ov/2ITF677O7UD+4Toe/ZG9HxX/UAGw0v0VEAHPjuRR/vy47+kr+nsPsAALACvsvwAXMP5QntHmb/xp36EO4QHv7/7v/wD0Pv8AEUDgwIEEBAggmFDhwoMAHD6EGFGiw4YTLU6seFEjRQEbPR4ksFAkQwEhCVpYkPKggJQtE3rceHDkzIIyac7MCBNjR50Wc/aE+BMoR5M3RYIkGKHlgpVLF1QgOFSiTaMLDSKsSlJqRKFDuwL92hNpVoVjBV5Y2tSph4Fbg2IlS/Bq3IRhddqFifcjT7cAzNINYPaBU7VLX7Tt6xcu4LmABeqNydctZI2UL/6laxZt2oNOUzIQmFixY4GNHVv2KXkr6p2iMcc1C8Jp/wYBGTxbQNyXKuPdmVVLZT31t9fhYEuSDnx84OaUIjKI8Mw2dOLecU0DDs61uNjtd7vnVX46fAClnp1OiEp9Md3rvl1/3/v+fVHs4wOgNO8yve711quTze4t+dSbDznMGMgvpQdeUg+59mCDL7IB+SuQtNcGM+8Chd5z8L+sAnwIRI4mnMw+COkb6IEJVmRxggg2bJC0BwGMsLIaL7sxtQrFQzEuDmX0sCoRFSNxtRx36pHGJLP60bEZPzxSuCKBi1K7JYU0kSwD+jLgIAeAFOBLwBw4aEu3uhTAzK3QVFMqNrl8LSsys6xqzpXuxDNPPffks08//wQ0UEEHJbRQQ/8PRTRRRRdltFFHH4W0zzaHevPMMi1NE85MMZ0UqErXvJRTLt87oFRTT0U11S0PSrVVVw8YMtYqBSTQgFdvLbUhW3F99T3QAGMAVgF+pYsBWackjlRi4zJWgAOWJYuBJnnrjyANBNBgpGMJ5Ja/K42Kk8kYnfzvAQss6ECADs5dsK5ZQ3x3xG5L/PamcKualr3qymOqs5SkG2hbCuc1sl6a7jUqX//6W6GlwnAjSOASkTXOYJzoxHdcahPCUCV/U3ox4HiJJJjKHeuzeCSFyboOQc5YaklDkSnmjmbvUj4K44Q11nexCgj7WMGZS06W6IoN1PmmlbMyjV+Hg05p6IH/p55YPZxJuhpG/jqE62egYV6qXYmNtBm8rN09e78SuR7IA89os22pFaSummqyrUY67dzWBpOgCZxyDrqlQKD7brtNxtvCpGlauqr2Eow54pHHRtzbvJFr3Kj2ZIM85McmB73svfT+nPQAMr/pQeY8g0py0SU0mjvTEVaa54UVctopaCkv+nDiZl98JtRpmhGEB45H/gHPC6+8bucLvpy04Wd6EsvXbbweR+BNn36k6sENPXbvTvaNe9tZDhL87HUU3+zoHeu0JzTFpJb+uOaMX6dP3QwV1E39J92cAhgpAhbQgAdEYAIVuEAGNtCBhcofTPZHqf5R8H/8u6AFI+iRNwl6qoIaHJV6ePWqVTlrhK7inXHW15pdnRBVunJhqnzlmGAdBFpZadYKpdS+vTyLhqy6YVUYEBAAIfkEAAoAAAAsgwAbANoApQGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/QAA/osASkCI0eXlQTd+mmBKomNFMWZmiFhYkSRSpxxBZkpyznYkLAAALBgAMChcZTJzhzs7tCQktGs3W0V7elJjqgAAq10AzRcXABYWwxMsAFVVTldXdC1pZExMtxc1H29vP19fX09Pu9LS2Awd0A8j1Xof4X8WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLAgQQECGARYyLChQ4cMDiBU+LDiQwYIDWrcKBDhAYoWQwbAKOCjSJEkOao8KKDBgJcwY8qcmcAjyJMXV25EiFMkAZ49LWbUqRIhgaBCBRxF+nAo0Z0CEMycSrWmUqYPnxYEipXhTwFdGzrVuvVq2IVGz6IVQFYjQqlU48K0uvRs245g1X5VG2DsXQBp1QY+6/fuW7mI6fL9yzXsXsFs/w4cHJZy18JtDyOOq1gt47xnHxOOLBmw2dF1K5OWrHnz1M52DYN23Pjy6s+pbefGiplsa9cyYYf9zFe06tJ4dzO1zPu2bLjAgzNnSlxv7ebITStHOp2788xRo9P/7B60eujry7/7Pq16e9DeWn+LHyC8q3nas21nJ9+TP074T8knXn1Y3deVcfoh599JC4oEIFECRkcgdbIVh553+7GnG18P6hQhcBMiZSBWCGKnoIbNudefevGFN99cDYY0IlMlppehiv+haOOJ0L0YYnkVWpefiaXFmBSODLIYoIsv0mdkRTMiVSOGJyLpoI5UFsmkj09mFeR5Q+6opZUhdelQhyt96NqPPUUZ1JTvKQkhlnGSmVSGPc7HJk5u9gTnijdySCegPDb50p4n9YnTnzkGCpmdFaFZ1JZ6mtnQXQZc2BMDCGi6ogGlZRrVTc0hQGp6oEomKgIJtOrqq7DG/2qVqZAhZOutuOaq66689urrr8AGK+ywxBZr7LHIJqvsssw26+yz0GL6FgHUVmvttdcesCq23HLbqQCp/iWqAQd0a26135J77rnphoqQuut2m2658Xr72alBRTTRWSmdaFJYJP3bVb9aCoxVwPhuquhJjCbpKGqCPtzeYl/iF3GVgkLalJweDpqjxg4t7JOnjWL86MVjZkwxeBaGmSVrHicJslgcpxnzlTMzJHJIDV8p8YaQ/Zziyuu1jDLMOa+VdF81T7q0pTpXfCDJDpsMcdBWT+yZ1CRS7XPWQI8mdHpL72xRz2U2zRHUSh+Nm8pbsyyk27I9fXPaY3tXNtc0ev+NN9hDY53yyXEXPbfgSMMtNuBkE92i0Yi/Tfjig18dm9xg0g2e3XbnXafjS0JOeeKTH8e43qDPKbrplWs9uuSWD8e3lH7fefrnkdetuOyYW5z75rsn2HrYvBue+e/rca558sEXOPubtUeqNlTKI99i9ZdrJapanEa/cbjSjgoZraOB39aqCa9IvmrQtu/++/DHL//89Ndv//3Hho9AvdxqOy3/12qXqt5FLwCii4AGtJYAxfWuBiDggRCMoAQniMAEUgsB9+KXRBLCL0mtrSTpOwnCOjg9t7TEUE4CIb/MVhG02W54gXvdc1CIKJGw8CEulJ7nVtS5QhmqhjJ6np//vHemEhqEbUzroZbyNCAk3tAhOdzYDj+2vBYxUUJOFOKiiEizKcqsiku6Ioiy2LupuSxOXsQZGOckxjWR0Xi+kyHwSic8mLVxM0C0yBMbEsUipjFtSrQjDd/4uMPJkXl0JJIgf0jI0BmSdaSLXR0/c8fENFJ1j5yk7hIppkU2KY9Q0iLDuMgQD1KveZ2k5CDvpkdRjuyMhIJh46wXxlXurYxdg2XJZIk6WrLRlqnr2Oo0OUdJKlKVjGRlKHHZN11WjZe4O6QVgVm4Qh5PmkvCHiSR+clLCjOTx9ykMVM5w2TeEo5mXGPHtEnM9VRSLqD0EjNp58yvQZOH6rTZOznj/02VbI9f3xKU+ciCvvGFMEkD1d5bZMVQWb3loFfCn0QnStGKWvSiGM1otM73lgl61KMNWNVHRxrBd7kLXAVM4LwseEGTDhClLCXASlmKQdkkAIUNQMhNu2nEshiMKSMEmClN+FOkBHVgn9lpk76l1Er1lCXsDGcxXVe8FjV1PkxN5h+PlE+noVJENkVhVnm6VekFEnZUtU9YDTVWLj11Msrc2FnFmVbngeeq4mmrU8sq165+MKoUuqtYdarV2+HTl+v8KpAEy1bCktWwVESszQALVsYu1bFu5WsR5zpV4qnVsi/SaxPfmhzFxjKSdQ3sevAaHdFikbTaMe0uUetZu/+udrACYK0bYYvE3vI2rpsNps106xrXjvG3lEUjZL9YzSURdzPG3a1macZZRI6zsrdtbG4Le8/IYnNOyW3TWi+73cd2l7nfTSwnsWtV3D4XnkM9InCp69dTrnex2SXve/mJXNk+k7YxrKpz3ctdAM8yvZP1rw3HG1rM7nW5apSsV+8rXtBi1cGjnW4p57thDbetuToRVUNH7CqmknjELmWg+EazPtsk9CkFZTFE06bRGtv4xjjOsY53nCv9xdR/UYnpAsMHLwvO1MgpJrIDSUrSCho5gwDb4IwrQjCYFTVfNiFhhlySzCsrzJU8I+VaPJzE+ppwn1Xp56SGKVXrpvb/Zdx06zmtGcdt0rW25AQPml+j5g+yOc9uxjOcy8nTOTvymnbubIDbOU1zCnfN4AT09RT8tyVSM3t0TqeE/0rpF3pSzo/2c6QHregDJ9qdlxYwJhHN6Gx2Woc+LHSoofJnUgd60W1utKxB/E1W59rVFJ5tnJ1q6FXXudXgfbUUYw1qXtus1so9b4QRPKk9j4fDC9mjV8TMNDL7Fk+p/iw6c2lm+YZX2IRuNqYPfexfJzvY/x32aIvd63ZLGtjXjfaniT1rE0L7tGgVtL7l/Vp6P3vUA78zru/9S0c7G9K+Zrh68w3wdPP74Rv5J8ACCpkXEyXGqmlxczwe4oWeuKEP/60Vj1fO8pa7/OUw7zFHg8xSIO+PpkmeeZFV6mSeg+ukOzfgkVUK5YFJWcv+mvJFsizU33oZJ0c9mLYX0scuQhiQ5S7LuRMF5rNxO75aV7Yfr85VjNMa4RUvdS+pzWl4L3ie0KtnpQ289lNP2u1BhPsQ5e7pgC/c1nenOJ+63sKv9xfvfVe4qcWdaXJv2r6Cj7fi6854dmua7ZB/c8LVHk1V1/vydse35tN+68XbtvHNzDpUxW51aWPd7P5GO7o5f1jMm3Drbx936h9/e9Z3mOxm7bd8/z370lPe3RMf/eD1vkW+w9r1Zbe9uX2fbcLj0PDexna3gd9X2A9f9pKnvf93Q//uyOce9fRUPVxxP3e/m1616I+7+kuL+OfTvfOVNzbokZ18gZM+8MrHdcw3Ss63bNAXfLw3ffUnTwpVgA3RPQ5IMyS3EiBnGyKHKid1gd6hgXEScx74gSAYgiK4UQSVcyVIc0j2cy8VdAA0dEJngg3Igvzjgi34GRmidBABdizxdCLEdAOjg3DFgyjhg1InGxkyf7G1gGN3gN23bnNyhAkYdkrYevdXe55nM1AofVJofu03efh3ekuSheTXf3+3ecb3hfAXhieChL5VXQDof8u3HmLIfwk2hb/HhMHlfWUxh8hXh1yYeOKHXlc4KXwocX4YgPZUheOXfx1TiID/J3pwGH5naIWMiIVrGIWrZ4djxn156ISNeIlamIl/aH/ud3xp+ISgOIaHGImJWIpoyF5qqCVsqH3fxon0pYcs4Yhm+IZl+H+Q2Itx2CK66Ivlh4hdGIjTNogfNIzFx4vv94jFyIp5J4epSIcTNooGqIiCWImEWI19eI3GCIiTuIhgiIqyiInrR33bh4e36ImWeI6hmI6auI7amIzcmHEwCGOGN4H+lHIy1nEZiINFxIErMoIGeZAImZAhyGQTFFL/Y0E2J2T5+HE994IwhXMqqGIyWC80OIMvklMcFGX70nQZIoQhEXVABYR4YZIWgZJG1WARuG0xuYnsuGFu+IvP/xgUMBEBPNmTPKkCM8FxEQeNZJiTxFiUpogUh6IATNmUTXlVQmlvRLmKwCiJzpiUOjkAG+CUXLkBMhGV+/eNbYeNS1iPr3cWAxABXLmWTQWWjheP9EeWVOiKlIgVA8ABa8mVHBATbrl7cJmEcnmHZhl9YWECebmWEQATfZl+6BiX4UiKXliXTCECh8mVIrBTiyl/jQmYj5mNdEmOTFGZa2kCL5GZezeL7CeOV/mKQbGWIHABIICYpcltVSeYn7mNYpl50mgRW+mUFyAAF9CVikmb2GeLNoma6pgAXGkrXKlUptl8yDmPKsmZu1kRA4ACTsmcTckCfEmcM0mPt2mPuf/Ze/NIH5TJlNqpAJfZnd9ZmzQ5mAj4l23IFy+xAk2ZnonJnsRnlTiJlUdJlUaJEzCxARzAAbZSoF75ld4ZnYH5nuF5lmExE7YSF89JgAzamWX5oITZFRKKEBS6oJs5nyFKi9o3EyMgACPwoe1ZnDX5YfJJokszEyUwAiWgovvZipEJmlMJjtX5ECcHKxV6kkGaNvzIERVYKgIpgQFpUCqnkE76pFAapRJFZEC3glVqpViqkVcqLjZ4Il76pVqSIWIKptlBjWF6pmgKM2Oapl1apmaqpmxqhGTapnAap276jnVKp3oqp3b6pnt6p93Yp7GYp3/qp3xKqIAaqIhqqOD/saaLyqiQmqjy5aiHWqjCOKeVmqmSakKU2qiY6qmCOqiWuqlw1amRKqqaeqrm+KikWqqfeqmhuqqjKqup2qp4YaqwyqqoCqq6Squ26qqxiqez+onBKqy1aqvTqZLJ+lu4mqvDuqnL+qrOeqy+qqrE+qt7KK3VOq28+qyK6q2JGq3F+q3Ueq3gChXYyqnaaq7laqzd2q7kmq7i2qvs+q72aq3uiq3zeq7qOq7LuK71qq/M6q/oCrD5yq34Kqn7Cq//SrD9Sq8H26rjsqU6R7EVm6VUirEzJ6Uc27Ee+7HG4mM1ty0YWaT4mJEZq6UrmFIWuZHxUlPgkaQLoS8h+YMD/+uwk5p0JMSSVDZ1AeCe4MmwBYuzW4ihl2J9UMSiEBuvCTuWRhs1A/hKs9isuzqOuHmKn/eWqsi0CNu1/cmaFaZ7jPmiVLutXyuZsKh/WmuNDbu0bauh8amMZzeUu1i1Xmu20ViV52d5azueOUu0uWicLiq3sUe3/8m1dpu4eRugAii2mkm2Bou4i+ufwci3frm1byu0Dwu3TUi432e4zai4AXu3SAm2lau2l8u2Q+u2q8u5nei5ZXGjx9i0rau5CtigAeCzQLuwtputLVpmuAhXsquaohuxeFu6aItf8XeaI1q2o3u2Oqq8lju2mFu790q6AEq5jbu80Nm8kZu5yP8Iofc4t1JZt8drvM87uaa7vRQ5kxAoUBZ7giarERP7UgSZI/ebJCC7v/zbvx4rshBJsik4vwZRvyp7wETGsi3YQAw5UkV3MEdHkt9rvbRLVDI7EgjBZZ+ku0rLr3/LuuTJYjTEwd/Ju9dbvDxaKiOMtHzUwb0buIArjyL8QyQ8tRO8uTmaJNYWHDXsvTF8q4LLNDscEwnQw5D7w6aRfUM8F0ZcvTh8wuebwumxxIfSxKr7xBX8wa5LM1RMH1bst74Lwlqcw1fSxUXMwjJpw0i8rHZjxl9siJKbvnKcvXHixmhMdS4MxXMcx3SsPisctWFWwjcrxmG8xRtmx4DsdYL/7Lzo28hSvIF/7LjMe8SEDMPwuTGILMndS8keXMhknDaZzL0W6sOVDKyXPJCRLMpSS8qdbMmGvBahPL2P68RjrMeO7LQWmMqyPMm07Mm2zMePXMe6XHLuO6RJQcAFYcApu8wzl79l/KMM5b/SPM3ULIIAnEARWbLxG4PbrI8XOcAKPIMPDFQRbLOMDMzgux48u3QqBDBvvKPpjL3orJt6O42qHMhqXMpA/Lt9Rr5hCcfxjMIBrb7JG7b3rMj53Mqm/MrA644QV76HO9B7LNF9HL0Gvcub3Muu/MIL/ckbWo5Zm7pgvNG/TNHBXNCnG9LUe8W1nMW+bLXiibUHB7r8/xnFJn3T9My4e4u6Kz3SHV3SFLyaKM2+PD3LLP3S8ozTIfy01ZfIhbfIN9zSSZ3T2rvTKm3UPr3P+pzEQdzPhQvRoWvTQS3QVL2+Vj3TYF3TEz3WYr3UPRoyd/yzeezSJE3XMszUuRvXuzvICq3VDO3Vn5vWOArUWDzVbl3PreTU1wfVa8zXHh2341vACOEAQkLZYeEAE0nMyEwQynyxKpY0mI1toV3NpF3apm1jzCy/Gqvaq83Nre3Nmz0Qse0RB1Dbtn3buI3boELbud3bvW3Cdn2r5OLbxG3bGTHcxU3cx53cxT3ORlXOBwPchi1f65yD7Wyz1f2A72y+a13YX/+bAQKQATFd1faM0aPMyRztmAwxARNgK+w9ARrQuW99tIqdtIy91Wz8EBOAngjRlC4g34i9TJp83hr908FNnQFwn/3dlBPwuvMNtQO+yuhN2Le7EPvN3wLAlQ4e4Ax40E+d0OnNmQ6QnQvelBTQjni93RHN1t1d4SlA4hnOlfE9uA/e1BGOz6wc4oOhActZ4k1JAsc5jyoe1i2O1PhGAj0e4zJO4xwO1/Xdwvfd11xdOSfAlb8ZnFz5AkGOu0Ou1rfM4oeI5E35mrG55Eyu0+Vd1Lx81HU93UXL46KpACmA4jWe10+exjlO4UUbABQQ5xuO5ol94wid5wfOHKLZ4HT/3uT0LegfTuhuLorrXZkADugC7uGLDeJ6DukMQQGc3umcfgKTTt6Bbun2jemFDqPq2OWDferZl5qVbt4SXuB+reOoLuQzZ9n4getYEdqxLRCdzdqfrRahDdpsM9qnfezInuzyk9quzeyw3c3t2+sA8OvNjimf0QDYnu3avu3cPuusHtVG/oTIzdy5vdzk3ttJhVPK6tiZbuAtkt0M4ZL5ku7aBe5tTtbeTdDRC97ineYds18qau/uju9SDb1XKwEPYCsPsPAV0OEDVu+Nfc5KXeG1JQELsAC2cvELYAEOPycATxXfIvDe/u38rCEMoPEZr/ES4OT5BZMiP+X4fXiS/9QBKI8QGn/xLN9eEB/zEg/mJz1+FXDzKa/xD7DoOk9eLy/dBL/nAWYBQm/zN78AHgDhR+/yEZ/0Mu86HxD1Q6/xHUD1D4/0Vz/2p9yOTv/0AhD1F08R9C72PI/1rc4QFn/zGCAAGKD2X2/jVX9hpKX0bV3w+i6IZ3/xIYABIaD2U6/3YW/1b0/2f80eQa/2UQ8BRr/4fA/3mF/yuUHzkq/xOW/5ebXuPZ/vRa7pi3bynb8AK1/5Ho9bmd/4Zb/lct/5HP/5rb/zUu73pX/XePYAEPD7wA8BDW/7/+76jk/rcR+8ePHxUxHyx9/uIy/UFp3Sw2X8sJ/7WV/n4QPNsv/SApndjyjr2c4e7SdF7KKt7Oif/uqfLOOv2a9N/u/v/vFPgd/vT59x7r292yWB/+XO7iSvpgBh4MBAggUNHhQgAIDAgw0bAoAYUeJEiQkZBMCYUePGjQwOWOQYMiSDhBRNnoRYEuXKiCpZrnT58mTCAxdF3sRIUkBNnDgZyKSYsGdPAkJvZhCQ4WZMoBOZNm2pEGpQqVMrCiAwFGfCrFpFWgVg1CvHogJCSoAAIaGAtB84Pp0KF6rcpnSBch0bEm/ejWDF8sVYlmOFBYXXFl5gM6NdmYxfOmYJGSZWwBr3Vg7g1yzmAII3wkB8uHCHjZJRmp5ZFWxY1Zq7Yr5cWTP/585/A0hAvEB04QqWW1tFbTI41dVRX1eODXg2Z885c+tOmNuC7+Ipf8e9Pjd7Xcqck/Ndjrl5AAjPdxeWsHj73fWN2z9+H7k77PmygW8WL9bDc+gC+KuvjrUAh3MqvsmOA+y7vMKrrLnyzIvuufQCIPCqAQ08DcPUEORLwbEYBKy5/Z7DQAAMnosBwOoqjOrCCznMy0OvQORrvAeeCwGDEJ5zCyMWrXNxRQ2Fqy/BIsG7j7bxAuCPv+lULO5HAYV8kTYZtaIxryVxaxKx3qBcTUoxh6QKxrGuHCrLsZYMwIIuFyCNOirnjJLMAs30Cs2e1PSKTcK6VMxHOy2kM8xB/42z8sgFk2TONowkeCBSSR/oUc46g7xUSDy10hMnPrViM8ZDgSxUsyq9U/RDRvPDD7lRp8w0VkM3HarTmz4dKtQzXx0T01kTpXXPqQxIyIFGBTC2MgcSMqA4YgVodrVnowVrWmdtDWnZVLXSdi1vvwU3XHHHJbdcc89FN11112W3XXffhTdeeeelt1577x3OWmmZ3Rfaa/3tl1qr9K2W34Cd1cwhhQtqlqaFH+611PtcZOhhh0qq2OKDNAs0L49AqkwnX02VGDueQqap47F+WrVBR0UdeeKStQt2q22xbDnEl3eNGbueaQaWNlyJ2jlPXo/+mbual7o5zZxrLJpTpP9n5u5U+pb+6mkto651allJrhNrvZoWFjslue4p4q9lDjtozobGSVejk2aPbvfEfotsT7VeE22b7YYPcPnwLk3vW/nu02+mBZ+M8QwJ9w3yjOC+SW6pHU8NcyIlF5TzzBAHVXGR1DZU8zLdxoxykSzv2vQCXb/Kc2xDUj0k1tP2unSq2ZPd8KzNPjZR2Fvc/W7U7QOeVeGLD5z5wY9XDvRcRR97eFLXxq53z2sni/q8rYdV97ZR3V56ols1EnzSXYMeyeRdRr/D3MEW/1fyhR622GOTBUxbgfMHWMECODCDCZBz2jogvhS4QAY20IEPhGAEJThBCpKLgAME4P+aQrD/C2pwgwXs4L88CBQODithGmtIw3aCwoasj231M1XGWDgQjM3QIBzDzMcEoDKviMx5jfthhk4GGJ0MkS8se5/O4gezIGauiZtr36KSCLUl8uyJxLninaKoqilurYpzyyKhsAe0+73NfHHzXuHUN7/7aA9/XezbFy8XRuKNUWlbnNEZK5dGS8HwhfRjXxlTp8fV8RFMgPxjIrOHR5zBMXFybB0dr+fHRQoSedo5GyRxt8Y1utGMjgydJv8myfAhspJXeyMmg4cqTpJydnkrHyinJ8rFuZKNp3RVLFWpPFbaspOMdJosz7c8O9bNlp4cpDDRSMvR3VI7v7Rk9JS5R2ZWPs+XxwRm2XYJP2JS8pnYjKb7tqnEbprym8U0XjilOE4qllOR5/TmHdXJxQ8ia3+Y8Z8I9XmwfiXQdxxxQEAAACH5BAAKAAAALH8AGwDeAKUBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev0AAP6LAEpAidHl5UM2fv/HhEZbW7AZOo8kVKdlQZBcU//ZrClqaiwAACwYAK8mJmUyc4U8PMt1Jv+TETAoXFtFfKoAAKtdAM0YGP/q0QAWFnksZGpJSWdKcrNrOMMTLP+bIwBVVSggFXxTYppgS6YdQ6qqqv+nPf+rRv+9bv/Ff7vS0tQOH9QOINV6H//hvQAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQwCMCwocOHDxkcSLgQokWIDBIe3Lgx4YGKF0MGyCjgo0iRJDmqJJiwwYCXMGPKnJnAI8iTGFfqTIhTJAGePS9q1KkyIYGgQgUcRQpxKNGOAhDMnEq1plKmEJ9yBIq14U8BXR061VrQaNiGZs8GGEt2YEKpVOPCtLr0bFuDXMN+VbtWwN2yV9WmPcv27lu5iOny/csSrNq9gv0yRhiYcOWwhdseRhxXsdrJlPlCtgwawGDMl7tmJruZ81TPdkHn7ToadenTqlNjXa21tWuZsMPedny2du7hdVEnPy476u/XuLEOFz17t+TJ0ZlmR8r7qe/nL4N3/53+uLr264y3B1XfszvR7+DFS29Onbjt5sut5z8/HC74uezhRF5x5nGH3l8BnpSgSO7t5Nx/AOrG1IB6FbjegYZJaOB+BvYHYYQcBkUhbRa2h6FmGl4YookefjiAfBPSV559zGGXookr4tTgSvA9ByNSI2JlnHXI8bVgSDsW9eCHP4ooI4E0EomfkTfqeCJrS0LYZE9BMjUkf1NGlqOCV/aW5X9bCvhkhVGCaeOYDFZJZotMHnlRl0h92WGYlsGJZJnenRmfnRbhGZSeFxYpJpV0akkoRAZMZkCJJzGAAKVkRsrYpFHdZB0Cnp6n6V+cIpDAqaimquqqVoEaWUKwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyCarbK2j3lUqAdBGK+200x7wLLXYUnupAM22xakBB2QrbrTbgjvuuOVKmpC552ZbbrjtYovAcKEGJRFFZ6XUnElhkcRvV/ra+C9W/tbbEwOG9oQoi3wqx2jDufl555oktrnnm1RKbFGSW8kZp8ZZUSwkpnEq2ufDGC/6mchekvynyQ5HBnPEi7Gcp8tJzawfyul5/CfIDyWM08JW6nwe0GIB+p7PSSHdkNAnET0nxDvLTPXRNdtYH88IMr2x100p7aDTDD0atM2H4vy10RtynSHZfcENtU9qh822im6jCLfZDs3/HZLUJV/dttUpn7yy1jPmjeXeYD/EMVSMy422wnU7LjaPjSfN+N04Zt3z1oT3HLniZo5++OeJh9616aQJjvfpXYPeeuExzy56xp7Hnrrtq+Ou+tu+x4Y4lKQHyvp9tNMMe4ayI3+7yrwDD71wkw9dedKcW7m5652r1S1ZnKpl6fVocatup5G5atn3WpVqsInqo7bs/PTXb//9+Oev//78998r+0+5VryqJcABkmtd52OXAQ/ILXgtEFrp2tS6GoCAClrwghjM4Loc+MB5Ned9lZqIQvL1uIPYhIQlASFKSogXAbjERVYZGFMQVr2oka9sl1PS8WrUM/84KnNPqyHd/yyWKO5pr3hL8yGa+BbE4bEJiWMLnvO6psRBAZEhfrsI4F5mRDJtz0ZV9BETsSjEv92wL9nzIhQxF8bfpOkkWbTIFnPWxY+tUUltdM0bRRJHiMxxbXX82R07lkfO7DEkfXzIH+0WyKYNEiqFTMwYA5BIhyzScmm04+9QFEm5HHJiTqzYI014xbiNsoWd7MwkK+mVM7IQMDuUEhhd9KJVllGLrsxhx2Lpph7S8pOFuqUcc5lJQW4SS6msii1DOTIiMix5VYseJ3+5TNQR75ilkyIPqUjNUrKSIZfEXiO/9kVfwrCaurumNBenTVmas07eFKYfiTnOsJWTm+eMJzNb5v/MotXTcvfMUDKho09rPhGbxmtnL/EJT7gBkCjhy9e2qPRQnbgvfSqMU0VXUipWeZRVb8non/xH0pKa9KQoTalKV5qsjaqkVBmMaUwbAFOZ2tSCCJRgAx/IQAV2MKek2iBPISjUoXrQRgmgZQMSklR4DkeGSCkYCp8qUotItV/DaeqHtqXVJeoScgq92PMMJ7yedfU/XM1nMR2J0KXxEkjNOSt40urUf2rulLAMq5OQSku6/nCt5MRrY966V7P2lalqtWv5Aqo3vXIprocVgFzd+MrBOtaf0MTa8lA02d/41auAtadg3VJKdGaos675rBVDC9DRUoawj+Wri1Qrxq//khK2mB1r7agn260itq6ZHdw6szm98UB2tr/9q2JxyFh2Fnc+vYUQbSlr2xbidmrBfV1Zu4ZazkxXj5Ul7XUDl93ubfe0ke2uJ8P72stiV7fKOy9n05vY8h6xrVF8boyii9bkgna5psQv5saLyOP6VrL1hW80pyg9svLWsMhFMHAVrNnhJlS/cOXvXP27WgBPkr2mKW1BuUvfCfcOw0W0rxq9d76PuhhVXH2xi4HqrJBiNDIu5chFLRO/3LD0x0AOspCHTOQizyrHGyngAq31lqESIIJB3alRi8pTKNeYWxS86U2p3EF65UuEVcUIiE/YLzIDbMwu7CZU7fXNAISz/3ysvauA8dhNye3zZv1874kdvE2B1jl3zNsdgxuL4me+84d2Nqgo57xL95L30F5NdDoPamG3OpqLs8ynpAOtzkE7l8/uZCiiAY2i5vWZ0KBeqJ81TWosmTrUDd7tqZH55826WtCzJm6qxSrqSLfaTK9WNaplDetpstrWwMZ1sT9NbGHT+tjyvXWnc33hXacY0lbcdKmV7WxdN5vXq27orwMVbHAPO76eNtNAaWLabU972d5GN7WTWOtoJ/vd3a72t6/d62yPe2nl5nes5Q3vQK0bOO2WNqXTrW+C55ve0H7wpBdd6fxa29D9rq22FU5xhlu60LnNOHUdej7xTRTH5//r8afCHDYkH6SjMv6ojS1j5Jrb/OY4z7nOmZVyJzM5Kk62sre4vMB3BZ3GQ5dylYluwKP2jOUOudcIy1xdwKz5YGYmGJqvjpOrAqzNb8ZhnBfr2hATGJSKbmbZPyzisTP33w4KOMYHvuB5W3zfsU07P9cu4ubGu+7Q1Tue+X52QKpYk/YmN7fNzWyHM/7vFZY4pxdu9wFfmo6HN2biAb54gZ8b8A+/u+MLO3G1MxqsINcz3SNv3DunLc+P3jPeQ7564Ure3ZQv+McvTvvPsz7wpd/76W97ecNT2PatF/zrCV98RmaerZuPe+fn7nvk6170oM+w8ikHe0wfX7sex/7/7/e7fet1H/PfN2/4LZ96PspTkfR8fmCHb93mn8313Gd++70v+9HnPfiDR395tX/o13/ZR3qT13GVp0P2J07yJ1osplMml0suZxA7hhoqJyopB3VJk4EGsnMgGIIiOIIkOD8VWBDf0nNTZj465VNFx3QDJHTgA4PxYnRGNRxeRnVUhUJcVylbx4EM4XUEg4Pv54Dpd18Vx368B0fN0WZoVnjOd4QrFn2Y44RVZ1kEaHwGOH7a1zVW6HYBloQMmIUhYyNf6GFth4YbZyZn+ICtJYBYuITuZ4ZFCGdqqH9yWGB0iH+913gHSH1+yIUIiCJtKIWIt35jmIdo54V1KHZ3/wiH4tWATdQzhbiF1hd6Sjh7TLiH5ad61Qd+C9hoZHh/lNiIaPSIYiiKihhMnAiAnheIl/h4DfeHatKKCXh9meh/fQh5sTiIWFKJtQeKuJiImjiHpciHngiLwoiJxKiLm3iMnRh7wah+oYh6q1iG0OiKgMiLyyiLu1eMepiNt8iMqgiOBTiNSBgWJ0gQKSiBKKdTHngh8Wgi6zgQF5gb82glJbiP/NiP/liC9SgQSmZAP4cAR8eCUeaCTUeD7SKD7cOQ52KDVZaDAANmU7UvQDgSWTdDP8iDGUlDyDhEeGiOWoiOU3h7HGd6qWiNJBmFltiNvnhvuUeOLOmM0viJ1P8IfOMYgCtJfKNohC+Zk+SnjeY3kjbJfyZ5iMlHlDZ0fiWJk+lYjT55jaTIlCIJie31k3bohnJGhUoid7s4i4K4jWLZi/+3k8uHlWYniY7IlWSHbIqHb94ofmYZlt94lIuIlvmnlmznd2UJk2eJewo4jOWIl0+pjELZhXpZlHzZd0ZJi89olWbklC6ZlJqHiIUJmcYombhEmZiEiphZk5oZjpw5TJ4JlJYJfaE5lS1ZlYvZlI85lnZJl4BZi9E4mbFZl8nIjYkZk3E5k3OZi6N5jlB5kkv5mlfZk/WnlW1piJfZFQEJABHVLye3Pht4Y9YJjxnZF/lIJv/4neAZnuL/iVJadlM0hXQziJBJF53tmJAJ9J4tKCnqJRdLdYWk1YT4aYuEmJ/i2BbzGRfbQoT9+Yv8yYj6SaCTIVcWAAEM2qAQYAEzEaAFup8HyoYTSqEDqhVnlQAK0KEe6qFnJaEVujQCmqGBUqIGyhhntQEf2qIbIBMiaqIOgqIZQqMYmqJ/0VUW0KI8CqEwEaM4eqM1eqEIKqMzChpdlQE82qIZEBNAOqRGWoVEaqEjSqJIChMQsKQ8CgE/imZTeqJfaqVVeqQJGh4ioKUtKgJN9aRCWqRB2qZuCqVRqhJNZQJouqUvwaZxuqd8SqVz2jE2ShZNtaMtCgIXAAI8uqZeOqZS/8qoShKofpqjMMGiH3oBAnABLtql9kkZYUqmfwoVkAqmZRoeLQorLapVehqpcKqqqyqqn0pKpdFVduqhpuqhXKqpoeqpb9qnrrqrrCqoMZEAZ9qhtaoAIgCji/qqLZSruiqnvvoUZ0WoClCsPoqrnfqo14qtjgqo2coRcrUBGZABsBKuJhChyfqsYqqsgMGsjaqicQErAHquzjqvrZqu6Nqs/vmuCRGvmxpi3Qqq/wqrAbusV0oVJCAAJMCv7Mqt2wqwDSuwD2sQqLUCJLACCjuwjbGwDKuuGYuxBBFzH+WQAYSeI6ue6wmf7hmfKhtl49myLvuyMDss7NlkQ1WQB/85syabnji7sxz0gkrXZR/0Zfiigx57n0VLGT24QimUL2AXfxxrtBE7gFTZN6YYdqcYtR2LtZHInJRUtU57r+0Kts04nKx4m53JdxrrsM6pmscpmCq5mgSrtZwKmm2bksKnnOt6tP5KtzrptncLt3krt3vrlm8Hl5wnl6/4q4rbq6k5f17ZMWC5m4xbr/iKmFHZt3bLk4CbtU87t4Qbhigpk4NJkxDbuYO7to4bur85usGprab7hGzZtSGJm42ZtqUblJc7lMhJu3jLuWK7sbhrnJgrum8rlXH7uv3al3CHOZF7k7xqr/RqucKru36rucYbuMgLhglHvH97vb4bvYv/e5dki42lOU+nuZXZq7fKa7jSh7hkCb2UG7aNC4FngbMTeL44tLMoe2U4251x4r8jFbMCPMAEHLP963MDGYMkW7L6u7L827MLiWXlaVMUSTAWSbSCK68ZkrQhQRIvxCRNi79Xm74ZLGIHF6whjLbqm7wm/EspXLsrrL2C4iMv3LtQS8KoGzYnPBc1vLk3/LtqG7z/68JeK8IaHL+uK8R/ssPh0cPe+8PgO7nSO8Qw5MSEGcRRDL9TvMREPLtnC8MlLMNM/CJWTLrHC8S3O7+WM8YJUMatC7xZXLm8aSVs7MaJK8XhK8d/CT9dbLamqcJhjIZ1XMSAjMNKnBSD7MV//wzGhqzGHdjH5Qt/RszCtnvGjlw+iezH5lvIaGzJxUnFIEzIjNzJ2HvIX5PJkWxJXxvH8ovEmXkeqLy7X2zDntvIn8zFMGS/EkWB+3uyDpx0AIzIIOtRBVzMxnzM/HjANZvANbjAEOXMFgXNHAWR6ELN4iKRQGsjH3nBZ0bJK8zBFyGEHLm0WCXKtHy6pByHEOEBAuABSjm+rlm9aXnORxyIEiABsHLPEqAB9NuaVKvImzzKrDy2JUCsCeGhNtDPhkm+srzI9OzN0ESrB+2hEvCGUzuJqdxKk1zJpTxwEiDRAtCiFu3PGN3QAf3QHK3ODvChxdqhL9CVJE1GAC3JnP880OUYASw90R/qAG950TKtyTQt0K5sjRpQqjrtoSnQ0zEtu0Ctyhsdw3ySAkYd0jzKz6C70PGcufPsw7WczlsbAAVdqZfKozeg1Fj9z02t0TU91Kwp1R5qqIjaolZ91fCM1hkNTqvM1p7MbEV9pwoQATB91iUtz3uJ0iu8HDh9pzxt1nU92Fpd2FyNzjYtmnda0YHd2D99126W18+rx+L70Vqq0JjN1JpttfWcx8IZAC8QAazd2hFQAqItm5Fp0kFt2CUMhSNdt9xrvVecxnottUu9vatbvL2916g9trL9zkyBs4vNJs2NFQ4gzS8l3TpG3Uk2RtF9RdlNNtmNzN7//d3gPWQNnLL828s6a94PmbPpzZ5PdQDu/d7wHd/xHSkeId/2fd+njcf6TaLgct/+/d4a0d//7d8VPEPcrHUQbctYAs5WtZFRZcfv69kS3spbzLYOwc7uTJq07dRr3dkUPsfTSwEUACsiTgEfwNCEzZi2reAg/s4UsAALACswvgAcgOKPreKRnd9a3OLPyQAzLuMz3gJZvdtb/cRdPdm+XeGpGwAo8OMJMeMwPuTD3b3F3dG//b23rJonAOVAPuMuYNcbrtZC7eFJfMmFyxAcwOVPDuULcOKZHeZ4/dSBfNvB0wJs3uUzjgKOTeSQbeSSfeVQnOWOm+ZqLgBsDuMV/wHhs/nh+z3hyC1cLw7lGCAAGHDoNf7mKQ6bY37ccCzo/UzoMB4CGBACh+7mpA3nmy3nLN7oqW17W37obD4BYJ7pyZnjCe7VWclnTQ7rM34Tii65O87qZe7puf0BvA7jFCDlhwucdxzszu7or2x9LsDrOGDjfI7jfq7j0C6a4zcB3v7t3n4C1j7lvG3GVk7mna7ksa2YtM67tp7Sga7uuT285F7kVY7l6I7FxH7Zy30+z91M/44U2T3e5f3L6cnd2B3eCr/wDO8/BO/LBU/eET/xEE/x4DMcDZDxGr/xHN/xR57vSc7p+v6LAj7g9h3gJk/gBgYh9QnVc94zDC5m5P/8dSvfX4ACAwUAA+ku8iEv7NFem7M9XxF2EBXwAA/AAgLAAkafA8bt8yPP88vp06cu9AdmECPg5Ia+AB3Q9M/O6F3/6EC/meg19AURA1g/4zNw7lCv9k7PmoKN6Vjyn1QBpBVQ6FA+Avi+9nnf9lG/1FkVWQWhAnY/4zKw91+/83wP3G8/9XEP+APxAHe+5jNeAfGu95Wf+Oq8+H9P9gLRAZGf9TOuApd/+E+P+V/t9zW/YddBA4eO5zBO+X8O8ly/7W4/2ptf9QPh+ZJO6Wwu+h8v+2xP+rWf3BpO9dI1FqwP5aJO6lAO+7Fv+b8P/WvJtbd//Aei+8de+KNP+7P/7/W02ZuB2ficLxB1f+wLgPfb7/3qP+zyzu/sLv64TxBmz+tpb/jcH/z3n/m2n/rP8aRXD+sA0QHAQIICBBBEmFBhQoMLHTps+FBiwYMTJxokEEDjRo4dPQbA+FHkR4sXBSQYkFLlSpYtEURMWOHBTJoPKiiEWXJhTp0MK/bE+RMowpAjjWosenTk0KAoWz59+lIoU55Up1pl6jMrUQEZlY5M+tXjVopOoZ5NKZUsgKpD2wJ92zOuzrBiOda1q3Et25No/aolO7ekYIuETe7FmzexXQNkDRhMEFnyZMqVE9Qw2DjrYwGaN2d2DHorZ89MOXvNu9HB4q+rDb6GHVv2/2zatW3fxp1b927evX3/Bh5c+HDixY0fR568dumhpEN3Hi36M/To1KczB+q8Ovae3OEKOBBe/Hjy5RsbLJ9e/QHDEts/fA/xqtuDBtbfD9/QPv71iAUwSG0jBtj7L0CNGIhvp/m+88+/AwA0EEHwIAyQAf8M3IgAgzBEakG5PKQLxMFELKwrDkEykcMLOdRQgBMTDKrBtWD0CTUDWftqRQxbfJHEw2b00b0g4UsRQxyV0tFAHjmkkSsZAxtSPhsDPPKoJANc0sgoFXxyqyYpmjK1Ko26MrUsb9wyRiC7zGpMsIo0sMy8zqQyTa3WxBPKMBWDM0A57aJTTDudzNPLQf/B7HFPu/4UK1A+2cSq0Db7FFRRsRj9ylG7vhyIU74gpc/Sr9wUCVOlNBXLU1UP7ZRVvkRVilSSgDwR1VFdXRXU72A9StaxaGVxQyZxJVbXD3k1yteOTD3K1liLlTRSPRM9kVmjnO0VWiiNDRHZN70tFdgdhdWS2xHNLRHcj5TlyNqRsE1WW0PRNUldj9jdyF2R4H2TXiH9JdLejvDVS1wlyUUTYPkUVlDguyjNy7uSOHMgWAEqNtA1iS3S7rrnNp7oNA5dc1gj15RDOWWVV2a5ZZdfhjlmmXkDWaKOTZOuuZx1tg7nnnmu+aGbgXbMQf7UOw+8o9XLNVr6GtxvafL/9JO6PP8oTG1Ag7DOS0KG1dwWyAcxlHDsCPX9iF+Rmg67bUNL7hDuAND2SO115Z306xqpVdFgLBGuU29C3Z5UboLn9ttMwAUVnKLGWzUc4kUTn3PxR51mEPNj+caQ7o7svhdvaecFMnK5PecI9IFFf1rzbjmPk3JALd+U9cwJt8r0amVvlPZUbf/w8Vdh95P3TH2/VXi23yY+NdQzRP5Z5YF/fdjTjT81+mynn173vqGsVft4uXd9RO87x75Z8fstv8Tum8/reY1Uf5h83EOFf3LwLe7RftKntd7u9jcuFw3Lf3krXf4ulb5rrW9t1DtX++qlwBwx8F0OvNsBR1c4/woqJWgOoZjFMBYgjX3MhNt5DtxINrKZtdCFL4RhDGU4Qxoq54MLGVp2dtadHfLwZzr8oQ9vqJAcCrFoYqvaeJKWxKlB0H0S/FfUmEg1JobnamQjENfs4jUoEml6ZqsQerQoFgtZcF8YDJ0GWwdALV1vgAcrYLm6uLA5NqyDVjJj2tC4OjXejnkB/J6hwhfHhNURbP/7YxsFKEj+GdCQd7rfru5IpjzWbY/1e+TgEMlBQKLvjX8jZOAy6bhRQm6SS6nk5y65keUhMJKb62TsPqm4UDKulJ+65eEOJ78A0I+VTvzRK6unyEBOapD9yyUwhXQ+WTKSgMgUZgSjma5ThmxrlpWr5eWmGcxN5q6as7rm7LJZuz4GL4GxLF44ezfO35UzRP5hZjqdCUdodnON9pQkOp2XytStskPulGYi0eTGeYKynq7EpznZONBFGrORctzmv5IZz9QMMSEhJOAIU1NCFHbUY9VRIcEcEBAAIfkEAAoAAAAsgwDmANgAiwCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/gAA/osASkCJ0eXlQzZ+/8eFRltbsBk6sGk6jyRUjVpVryYm/9msLBkCKWpqLAAAZTJzBxUTMChcZ0pyhjs7qgAAq10AzRgYx3Mp/5QT/+rReSxkaklJpmVDwxMs/5sjAFVVW0V7eFFlph1Dqqqq2Hod/6c9/6tG/71uKCgou9LS1A4f1A4g4H4X/+G9AAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ABwxIoKCgQYMJBCpEIEAAgIcQI0qc+LAhxYsULWLcCFEjx40eP14MKXIiyZIRT6IU6OGgSw8KBTJ0iDIjzZopb+LsqHMnAJU1gaIUWpIoxwEYXCrFEHOmz5xPK/bcafRjVY5XQU7FmZXigApKXVZo2nXk1qFn0UaVuvZn2qJvRZaVCCGsUggL55qMi5Vv37Z6JQaGunYwgBB2XYZIOMBpYb8YDbN9DBhyZMtmoya+K1OyW8qgo3oejdmmZpcfMjRc3XCDisakQz+NLdsnbZ8tD6pmzXoD7NJ7a1MFLpg4YdHGeUYleDA179avHSMXzjX55OnUgyYfYGKzArx5rX//xk5+tvjb5qMORGw3RMzflbOrlQ83Pv2jSBMzJXveOnrb/dlXHk4KeVDBgQhWYMJ78N33l4MPDjicgDsxaOF70qUnYXUUAtjhhE9dKGKDG85XookanlhfiCNamKGHKsoVIIRafbhSiwy+CGKKPMIYo1Uz+pTAkEQWaSSRMxkQlQENKekTkwI4+WSTS1L5FJRS7oRllVGa99yXYIYp5phklmnmmWimqeaabLbp5ptwxinnnHTWKaaWDSFAwJ589umnnwdAqeefhP6ZJJdZ4rTllVZOGeUBhUbKZ5KQSloogAwEoOmmnHbaKQMHNJSpp6R6ysB/O/Y43AGjlupqAKcK/8Dqq64CSCutBDR066uockjjZQTs6mpDwQrbqa3GepqrAMl62qt2NmpXbLObEkutpsheG8Cy2mr6LIo+mjfttdZem+213Hb77Yo/9jUuteVSey616Wq7rozRovVus/E2O2+z9ZIbpKq+Irdvsv0m+2+yAcM7cLgQD3ewsQkbu7CxDfP7cKoRVzexsBULe7GwGSO8ccEES9ttACHvOvKuJVN8MrS/mvXxri3f+vKtMYM8M7gcS7xyzrTujKuu6v7Mbsr6Di3Aza8a/WrPOCuNb802QU0r0VEPtzLVt94LZL5wac3r09pK7SrYW1s9NtZ7mT0s2uZ63S3bZ5N9dbtayf9dKte12q0t3nPr/TbfwDrt97GCo4u0vW5HyHTZiqfdOL2PC2y45B2rnPTinKpdKuF/R14j3MWB3ingpeLJrLYMzOR0ojUt6ijtKNnuOgKtOsy7wHYGL/zwxBdv/PHIJ6/88qztbmmkgeb5fKGHMtql9biXpLuiTVY6vZ9JNoDA+OSXb375mMIeqgC9Jxvr5qcjbtas18baAI4Xik4q6aSKzXnQHnMaAvBnIf0pK3MOg99lFGgzARKQQQbsFP+cZboFoi4nquNUnh74nghyaoKrq2BmLsiTDFZLAAPkoEI8uCkQalCEpiGhVEzoLRSqcIWXAxgCNcbAGMovaw684QD/WKgpF56wh8GRoVtoyDIbCpGI29qhyZBYHCpiMIg3hKIRa2hF5SiRdXNLYRZzyDApyqyL15mcjJi4wSeSEWNm9Bkax/PDuGFRhVqMY9Xm6L++3ZGDeXwd5PgIQzsmTYx4fCPJ9Bi2Qlbxi3SDHCIBqUiYMbJthCQkG504xup87ZJ5g6Qm//jAQDotk5Dc5CRLWUmegbJwokwlKQloyqShso6pmyX+ajnIWOLyiocU4hC5J0hqxe6VpcueSLZXu0a5TpkfEVT7TIaAI1nTmszLpja3yc1uevOb4FST874HKEGRE3zOJCY0OcLM3HXvnH2iFDwn9c55pq9+65umsN7n/0vz0M+YDflns2IlUIu18mjFTGA/AfQxDgiAA2fbZEMf2rqDTg2Z/XPkccRFqglMYDUenYAGKChRUlGAAqs5KQVWEDqLrg2jFLwlRztVgoKsxiA1IGnlOkWBBSxgNT5dgAha6sm7wTSEMmWopwxyU4NMIIQl5RQDggrUoLJgU7zU3EKFxqkJMLUhLoHqTjd1Aqo2JKgLuAFWXTq6o74wqVzVlAMO0lSDtOCFUdVUB9Ba1aBSAFts3Z9bj7jVAG4qAnQFq0sccMK8BkAEfD0rWheQqawq9JdeNNimNOCSuhoEBY0dKwsm29egSiAAluVhYT0XABR0VrEuGWkT8wrZyP8KYLI+ZUBqp7japmmqproRQAaUYoPQfm5TPUXrBQRwAdwOdbdn7C3lNuVag6TmA0qR7WzHGoAXoBUEFwABbjuA2sAeMKGqxWwalbpZ7yggAnjl7l5xO9kTALaogxssF6W7xk4hdjOMNS7k3iUB+qJ1VNCVI3+BtK/NPDW+x+XUCgzs07/eVzufRC9v1UvHmXY1MTqNMKfKSt8XEBXDRtVwdDlMmoO1IAIwjnEEShDiAZNKAjjOMY7Ju1b8Ok7FCmZxfxzrMBMmeI8Ldhd3i2w5H2MOyEgWcmWIrDEjEzPAP8ZysxyQzmZe73ZcoiGXI0mtMZswnGhOs5rXzOY2q/n/mYjCXpzlTGcw17ktMjqAnvfM5z77WUkB9bOgB91HC0rZAINONJ8tgmhFD/qexswn7Ao9QiEX1FgE1afOzCtB/TYRrihjr43r5mQdQrmRoKaZZkXsL05/0NOU9qEaGbxkg5a6jKfGZJLj52GBWfnWcMx1KKUsS1YrzNUthLVGMztrJRvb1ijOr7BhSWwhU1lkyC6islMNNMOOWl7ZjuK0S8ftpYna102O9o9PuWtD95rJpFb3k9ld7Wb78dnYBvYix53Rcu/t3VVON1oyTG97u7tzvv12q/VtSX7HtN2VBrjJfi1vUxcc4eYGIGvRHe+Bp/jiGu+2qiUuM4p7XNog/w91yDPubY6Dm+GudDhSIS7rc8P75RXHdcpHjvF/2zzgHV9mQ7RsaqIba8zr3Eg7tddld37ZdWIGIwXdTPWqW/3qWM/6auB8Zy8nHSNLF/rT1TlnO+NZRg1Iu9rXzva2r1flcOf5yvHVaEcT2iF1tzufAcQYITZAMrFOoqU1fatMB11Gfb+hjuQed5E7froKP3Z1Eq/CxVvgAZjP/ANwIHiDR7zntMa3y4ZDeQ7qiAQGHkHnQf+/xkPe5QvXTukfqCMYUFgGj6w36+8deWijZfYEfJEFKOxTEmx09wefe+h7n2/ZC5NED0kB8RcQg+Mrv/WM3/jNY//752foAdP3qccFmI38z1+f97CXvPOFmaERhH8BKSD/+ZPv+v6KftOT975GXLDc54QXt+PXYeVXc/PXQPdXNKSnfzRRAGHSXGgVf29Xf4fjeUB0gF2Tf+y3fzOwAf4nXmgVgAJYgAQogc7GfKOHgUL0Iu5HfNUnfyTIawNoSCaIf+uXgiQxfMRnfC6YfY/nc3GVfr4HF8CHP7V3e7lHgasngjIIhM3XfRk4FahHX6p3hDFIhUqYSxYYOCioeCpxeZqHeSC4gz04gVUITDNIKwEBACH5BAAKAAAALH8A4gDcAI8Ahf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev4AAP6LAEpAidHl5UM2fg2AeHSERFKDVZSGNLYXNUVbW7AmJgUWEzCBZognWbCHJtGJFQBVVSwAACwYACtpaXcsZXJFRaoAAKtdAGUydJA2NtwMFtIVFTAoXKAfRz+DX7+JH7vS0tEPIs0QJQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AA0AGEiwoMGDBgQIQMCwocOHEBskFCDwoEWECite3DhwokaOFz2C5ChyZMiMJk9STMkypcIEA2LKnEmzZgOFLQ/izGlwJ0+CPn8CCPqTKE+jQo8KgFmzaVMESFlGlSog6cCpLqtaxWqSq1WQL2t6oEC2LAUPNaFqFeoV7Fq2b4vGVfq17deLYWcmUMC3L98STGWq3To3p92Nh/EWbpnYYuO7QJfSJOG3Mgiag5M+7rmYat3OWT9Dlht4gIfKqEtnhiuacGvNoLvGHu1YsswKqCtXmLla7mvWroPDpm3Y9gAKuVGjjdmbrnDgw5/7Jk6VaQITySubYNq8+O/p0cND/6c+Mi+I7KgpMN8c+btz8eDHk+eY93TlEBwU6ldoYcOA7ozN5pZ73kn33nz0GUeZX/ntt58F/7F3lYAJEhighZ4h6FZge/mFn4P8+QdghvAdGN+JJmqok3EDnIeeeoJJOBSFiNGoGIahGahiQXnFdF12u/Emo4wz4iibkeXZSFyPMSGX3HIxKskZkgPqeKGVO85YWkwkVODllxVchtmQUk6JZY4lFpjlils65dSIaMqXoppy0rlmZG26mRaZVFZ45pF/asiknk/xGWiVaV6ZaJaDEjpmme0d6ueicaLI0kc5TZTAppx26umnCaiF6aUoCVXST6fylGqmpaLaanEgxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxvI5K6kIENOvss9BCe8BECERrrbWiJrVqS9suq2xKHh1w7bjOiiouudd+xkAA7Lbr7rvvMnCAQuvCay+8DBCpL6SRwnZAvfcGHEC+AvwrcMCfHXwwAQopLPC+fdb4GQEOB6wQxRW/m3DG8DIsAMfwQiypxIRhDHK7F5/M7sYqB+Bxy+yKTCmgsJmscsoqs6zyyzDLXKeicNl8Ms4n63wyzy37bCnQcgkNMtEgGw0y0jfzO2HEN5YMcwBQcyw1x1QPbXWRI2dd89ZdZ/x1xmE/PbbScwboNMdpV7x2xW3T/fbeWNf/NnfGdTt8t8N5A8532bVNjLYAfys8uMKFVwy3nXFT1bjDgTtO2NaRY374zEkq3jPjLT++cMOj972i6pxdrnDmB5sucOevf/4ziUEv7jrCm8NM+8GTM115Vrs/THrOvbf8u/Gs82h707qXnvzOqCf9/PA05z568fbKHvDyFl9P+fhyR488bJxXX3Xz/d5OvPlFT3+0+mKzfzXiK3J/L+wCf2vSRDBjgFoW57+RdAtcr2LVSkylEAQATGwOrBqyJkjBClrwghjMoAY3yMEO2qqAIKEWusY1rQaO8FrZYuACXbVCVWXkXCeElrliGC1RNQAiOISIulomL3rxMHi4k4vB/1RGsCGejGANcJRTvHcv8O1PfMIrTuNOIIATGE9/9mqgmzbQnyXKb2r0c5v9yHY2eH0AA/rBgBodELLjWQ8BTYlABCwgAAvIMQIPmAkT7eXELEIxiEpx2gf4op++sKCNWGwjHGkSgQUsQD+OXEAG9PhFsIVRb2Pcl9NKQEiF+AUD7+If8BZCkwdEEpKRTIFM9tixSxouk3sTGgb6Uki/hNKNVVukTCRwSoVEcgEayOMAWPmuPrYRlqJrl19q2RcUuEuUxtPlAEzZSwH8cgEQiAkx3WXMWyJTa+2awDI9WRk2xgyXEJxJBn6Jyl/mcZvt6uYz/1ipQLZrBJVhZl8mgP8ydLpNlxe4ZjsjKYFhVpJtrpQcPbMHPXaJc5wCyM0IzpnIW+pynez05TUX8AB4skue/fxmGQMwSwYJgAOoKUE/KzpPXULgmh0QQAc2OkmPuiyhnhOp9tjVAg9xIASo+cBKF6fLB2jglyLogAg2egGDos93OK2dThvKLnyiRwH8HOropBnQjV6zoE6FS/o+ljr8mWmkDr2qOSlKVJpg1KuOFKZNQXrOqdrzXehx5jxZ2k9pThOujsymNg+Kt6iO0q5ShFdJc4PIttKEl14N5ioJSzjDMs+szkumuybA2c5yVqi35Os5/TpNCZj2tKZt6mSfqjzLhg+x5dueY5VIk7n/uvaJsLUc/HJJ25rYlqzWy+37ZLvV3taWspC7rR+FKxvRcs2feiMtbUFIEuXGa4Cjo+5GDvi/BHKrgQ/8Z3ijC6ryesqD6E2vetfL3va6973E0q5KqkXDZ5WQWfUtl3e9pa39IrCFCjQADPM7w/wSYIdEnJcAxlsxgjE3dIQxIsiKyGDNsZZ6wF0fZtu3NN0SN34Xnl+G67fh+4HOLc6FZveQe7oRi7HEZHRfc3frNRbPzrrehDEQh2s9vv52cQ9G1E6r5mMbfw/H8wzypIYstiKHGIwuxqSOF5qkFENXcEZuIpJDOmVYWtnJYoVqlF/Z5Sl/WXpPtuSYFapkkqG1/8loDnNr15zTMp84QWc+n5wxDGQ7y7jKNFZblvm45br6ucM8JnKc5TLWPt/ZzX9GcaDtNuhW0lmqh8YeoD8ctUoXs9DPbbPZmOw2MDNazI6O9JIRPWNO1zjNCL30YTNNPg/3eNFKaXRZHz1qVm/61iqTr0UmslYRF5tjDvBvdwH8XWbzV4WiTfaVawffalv72tjOtra3rWwDdjuE395uuMXt7P8KGyPlXva5DcJdFSnkAPCOt7znTW+BvJve+M73jhmq6QEJON8AjzdO/h1wfCP4iAqusMIcTOsoBkjCHKOwnk8951T7WsgXx/OksQzrwsr6srxOnJk3buE9i9ji/f9edcprlGcQmxzKKK91PWXeamC7nOJ83rWqIZ1xlpM8dp7mJqj3DeGQ5+/nAvuxznve65UrpuWd7nhlP/7ahgMysa4WtNSTS3XcWn3msbV51F+u5pg7/OpnTzScJ55rVC/d6SI3euuQzrutt9jsaM872G2taLYXR9fB/Tq/sS72V5M91njfu+IHH/a+37ztFX87zYu+86fT/V5KD7zcMzvyrFPa7jfu+nIFT3mqOn7sOD+55NPOeL3X/PSGTz3MV+/60sP96J532LoLQmwxHztj0t49QdoN7nR72/jFj7aK28jt5jv/+dCPvvT3I/yOjFsl1Sd+dZG//eoDQPvk9j7/+MmjkAaY//zoT7/6Tcz0uFfe/b4heMHxPfD5G5wweaLtTaxGdIzfnjMQlzES93jFkX9KBCett3i2N3mSlnslRxo1EQEQMIEUCAERsCf8R2X+x4Aa54BABxtb0lVeJUxRQnobyHq/BntaBxdb8lYbBVYluHkc9n+c92alhmsFOBONBFgLoFrrkYGiBn93VXgrCIExYVQ8uAAqICRAaIIqR3gq+HksKBMvlYQLcIE/6IQ8R4ORAXWxpxRMQU1WKFkR0oQyyH7th3tEKIUQyEWxolQbhYUIuIAoeIK114BryHFTSEeyMlPXJExzaIcKKIgJ+HprR4ABAhNuCCJw+EtYaFiGWth0HOhzHph0+CcTSMiDGsCEkSiEk2h5lVh3UygTOwhYPgiJZxhjaXhWpKY3pgaGbgVYMJiFqdh/T9h4h4h6sDgTIrhRJEiL77c6naiGUaiHRqiDFUiBjxiDwciKq1iDrWg4fBUQACH5BAAKAAAALJgAGgDFAKYBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5wB6esjZ2/4AAEpAif6LANDl5UQ2fkpZWXZDQ5OGNboWMg2Ad68mJitoaCwAAHorYwMWFABVVVWDVKoAANIVFZgyMq6HJzWBZJIjUSwYAGQydGWETN0MFs6JFzAoXL8fH6tdAMYSKj+DX6UdRLvS0tIOIgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMQGEiwoEGDBwwIECDwoMODCRYaAECxosWLGBUKMHDgoUeCETd2/Ogx5ESMKFFqNNAggcuXMGPKlDiSpMMECwLo3MmzZ88FBxbm9EnU54KFKZNiXHhgaNGnAY4KaAoVqlSlWCkubDCgq9evYMMiYOq0qlGzUAksRPsUaValCwmwLRp3LlG3b1MuTBC2r9+xAuTaHZx2LeGdePMuDXwYMePGARIrtrjXr+WvgAVDhqxWwGbJk7U+blwXMujQlS9fzrx5c+fPAkIv1ny4NOnYsikzVL3admvCr03jzi2aNmHfx4cTT827L+vfh4PfJl4R+WDrdk9PZt4c7HPog6XX/1aeG/tc82y1K+be3et38HPFJ6deHLbx6+Rls28/4D18tPLhRx8A6KFVoFnq5bVfe/79V1WA2eWH2mjj3RfhgAt216CDT0F4noTbUZichR9iuBt/mB3I4U4epgfieiLiR6KLJvKFonsqrhhAiwa+qGCMEc7YY4034gikjj3xiKCPb+XY1pE00pdhcxsiyZOSVSXYJJQ9CrkkkUX25ySHWEKlZVZj3sXll1KeWGSVVupU5pMmepnlmneC+Waa/81JF5No4mmmoHS2aeOehJJp2HRt2jmoo4VSNyVvcMbpp5p12gebnjdWiuQCIcF2Um4a4SScqbeNKlupCLTq6quwxv8KGKrjLWTrrbjmquuuvPbq66/ABivssMQWa+yxyCar7LLMNuussA3ZdFBCe0l7k0TUrVSTtSDRxK1BJmXr7bfdikQuSGXZBZRQjV3VJlWHSQUvYe5KOlW6c8mLL1v7xrfoeJkKB+mfAd82sIOX+nQmVnz21DBPC8OVqJoH9/nvfI1qKlzBFcbZ4cUCZizwpiIb7HFRCTsMKMMTK9yyyhyPeDJRKUO8ssQVq5yzzTHLOLNPNTvWc5Ak27uzY0f/FrROESf1MNJFL/eyzUm3tnRkNzs9NdQbl9zxz1eCfKHXMndttMZgyyl2iWT7bLbUVT+t9NpRnj3y2+VtzbTeVtP/PWTbROOtH99y9+2Z4BPGTXjWei1etWt+s2m3yYiHqPjjnEWeJ+AfKj5052mzqPmgn7voOeemh6724YxO/nXrcKOd9tVNN3551HnfrjrtjKNU+O+9zyb7z6CO/qSqoZXab4+0Jof8ZMqfuvySz1Zv/fXYZ6/99tx373310ZJLLUPnDhQucdqWT4BJ2357PqkStSTT/POPe27z6gYlwPRW1e77ve0iSwCDd5GthElMAIwX5FgHMNR1CXeDO1SnCrdACCZueCGzlwRR5Ckk8a50D6wcjDbInw7q6IMOXNLpNHhAE64Iha4rG+zyRkIGUTBzDMRYDN02wwi28IaNgWHs/+7WwwnVUENAjI7xCJbCO61QakekUhKBs0RMNfFRFgxRFCk1xfBU0WUgVGEWR/hDvlVQhD/SXRG1WEbMBfGLMLvik55Iwzaqbkdw5Jkc/0RHH4bJhYrKYQaHSLk1kvGPXbSLEHOHwbGxEJFmxOEY09hItj0SUW5UoiAdScjXNfCSE4zkGzdpyU7K8JNQtOPu8ii0PVJsklvaomoAiTBW7i2MToRloGTZG1HSK1TCeZ5ioncb/EVImHlhlayWGau98G9Q34umNKdJzWpa85rYxBX9tgmTBpSKm+DEFvrsRy72qe99qyKn+9TJrQQg4IANWMg7EWWieQ1GXwOs5zOLgv/PeM2zSCH5Jwf9JzwiopKRBiWMQFEUUEjiEoto3JIaD7NQ/jSUnq50WR8vmNDBVLQ9Fw3lQ+eoS5ZNVKEHDOlACaibkw4SoYWkaErl6dCM6qykOKskWz7aHZWWkKAFdBxOteZSu/C0OT61IUurI9SIBqqoczkqb5KKxKXWp6OchKknURomqkrRqgRqqiEpidWozlQAUu3lSPk4VNvpFC1pvYxXuQhW4G3Ucm81S1wtM9dZArWled3cDgPXmL36pa9qtSnV2vo/qO70rIYVy1+Z6ti/DRZ0kIlsWBBrGcCs9ZVONWlgoaJZsHD2L5O9akx1aEoeyrSrNMXoZVMX2pz/lvWxsEVrTWcbwrFKdLRPKe1XTuuc1Ia1spJrLWEpyszmtiqgzm2uOOHHkH3+yZgfQuZbiDke7Loom+ANr3jHS97ybi983xofets53XSaq3zmhG97kxc/cHKTndbyrlnWtb98vsu6RhFgvIxrQEjaU11n9O1TgctEUK40k1QkZd2Ui1kFs4yXnU2kvyRsWQrT1sISwzBqfRlhxhZ0tS/1IybvuMjBIVewqYQkhL3I4eRq9ZSsraOMWWxLrH1WoyYOqoiLS2IaBxmwt+2wjle8yhrD+MauzbGKQzljRfaYwGI96JQfzGMnk06xXAOx1oYs2SJb2cuR8nBvtWxEVYau/8UcRXFWt/xTM2/4yJRlsBUdXOcq37m2RNUzGA3l5tldua5ZlnKbd9xkPKt2qyleNJPffOgf3xTQjSOzdzTMFjjjNck2prNS/dzpSoN5b3c95KTBVjw0/0m7WeFucvS7JFhjRZnRXaYzhWPeXvv618AONjbXay31nnO+0MOvtOJ7P2QPU9k2YXY5AfwT/VGbJ/WS2oHzJWB6YXnb/Or2PRPM5k/LuZRQXi7YPA2jF3+Zt2JsNKYbK+g4wjuX8hazW0H9ZBfXGzzsJuu5J5zuChva1XtWc7z1TW9+n9DU94Yow08M6ZkF/LcOT3PBP3xwRx/3362MOEnzXe52g/yWp//GGqkBBHGF43viQXX3C1u+8TUr2twVP9nFF5zxBrtc4h2fN8VxHGmcE93iNPd3zxNe84UHHeZIHriSlS51K+1ctEsftMjZSvKbmzzr9v75yCmNcK2LnetQzzPYAZ70OOec4FR/u5VaLSpxVVd6wbQ7re+0d2gK+++AD7zgB98rWyulVMfeiN05knjGy1fx43xvsyVfThNdeyfZzhu40dJPbyN68/sVt7pM5HG7lj7RgyG90GN+ch+nvHCqT/ujj47uuNOeLbEvucDlHmq33x4tuff67n/fe6NHOfVtOr3Mfd70lx8m+EX/etX77fvj2wX6c66+uqM/fOvPBfu1177/wYWP8elDBfxwFz/Hyc9z8z8F/VNXv825X37eVwX+xZe+/d99dtA2Bv/UZ3zbl30COH7IZy/K13pYtnxEAYD813xAx35Y535F4YAaZ3vel34FuH4HKDUJuHYh139ABhkWyHwYOIDht4Hzd33Jt3pRt38XKH9ORxglyHQnaIAEqH/EVxWGlxQaUXeR13dPIoSvpneX5xhEqCaEt4RM2IROOGzqY2yP14MpkT5T2D7sBXnU5XiTx4XT1i7W5l/2AnpV0Xn39G1HGBWily/kJoG2RYE2KIPOt25tp4IzSH/tB4OB9IFwaHYQOHZPp3v1t4MPeIMciHRlF3Z/iHaCmIeE/zhziahHW+d/jTiBelhLkRiCi0iJbhhoIDg3mYhykziCsvdxn2g4fHiJfmiIK6hzdaiDGRh/djiHP3N1b6iKisiKd4iIqfiIJiiHEViLr9h9KKiBsFiMljKMgxiL+UeMOOiKoeh6o3hppWh6XYeHluiLcTiLwciLLqh2fZiLwAiIdBiNC6iAiMaAFmOO6YiOlrZY15iDzniIKXiMzxgndJd3QZiGWJOELkOFKrFrxcSPT1iQBnmQCLksAJkRe2Ff2+RsyQSR2yWRsUaRt2aRh4eRPmgiwtUVntUmJhKSIDmSA2IvHYlAImkvKSk1K1mSg1NRFfAAMjmTD1ABm4ZoLf85ODk5IS5pkt6hAEAZlEDJAQL1kSpJkiyJlHnTk1KzUBQglFAJASmCk0qpk1XJk0z5kl9RAVDZlf9klEl5lGIZlmSZlT/ylRzQlVDZAUaykyHiljACl2bZOP/0AGrZlTaJklf5lnsZl305l/8zTwhwl115Ah6JZXK5JYkZKIAJI/MEAYTZlQ+gl2O5lH+pmJfZmEw1T1wJlRtgAbqCARkAlpZZmVZpmlipmSzzTx0AlaC5KxhAmqdZlrNZmrSpmovxlZ75mrgimrKZmrfJl6gpnLhJl18BmZE5mYdJlcPpl835I8UpMUWZlndJAVO5mCyDnRKjnY0ZW15hl3eZl8v/yZ2NQ57/Y55m6Z1e0QEU0J7uSQFSeZ2ZmZ3zuZ312Z26JVvB6Zz7CZ33CZjqWWfoGVQDqhsF2pMBqlQHKhoLGlYNypG71Z+Y+ZwTKqGxZne5xkwaWYUbGpBa6F4LeRErsXh2l5AmeqIoaqLEJi1S2IUkGnkhahFW2IVYmF/QRhL+iG1hOGDMaaEsQ4b9k0CeB6RPwY94xI4Neo5FYQICYALdeI/JiKT/eZ6AcwEXYCtWegEOQIq4yHZSSqGM2TYXAJS2EpQpwKXauI6nl6QxQgJkuhBCeQHU2KXQYYtaw6bGAQJBWaZCOadpCh92Wp5TOnRCyadBCQLwGI5euqaD/8p6OyEBhQqnULmlqNZ6bYiN9gmmEygCUGmoQSkBYUanoMiomvqGkBqpAqCWIlCpp3ip8lihtglTeiqUoGkBXUkCofqni/qNDNqoL9gCQvmZG9CVcsqqilqnyhimpeqJOsGpkakAZ5qrzOhByUqfy7pvj/qslGqsooqKvOqgvgqOOuEAkbkCfjqtD/elPpqprTWraomm6AqJpLquzMoTEnCv+HqvxZqo3eqq9eif19pwx8qt5TivsaqLtIiMVlet7Hqw48iIuoqs6uqwxLmJ8KqwOpKPqfKiW2iEeDeQvJaiIjuyJCtsK2oTLVpOHcqQH0pfLZtsL/tslLdOGyE/Dv8pE2nIXzmLmOHaq2O4swLAFX/kr8YIsPQqqNNoM1GUAaLZWUQri/xJsVGLsINSQxqgARggABhwtRowAd7xtM0Iq7U5tg+rJiSkAQzAALaStgzgAV8rSd/KswE7dPixQRPAtmvLtiqAGWAbgFMLnFI7j8wDFhGAtwvBtgxQAl7rkX1biIBLthVLtUP4FXdruAKAuAwwAu7RuDEYuY/7udw4uV7hAYibt4i7uAjAub/ouaz7t6F7XV7xAZhrumwbAYwLt9XYo4G7jBFyKKRbuoeLuQzgtamLu5XYsJDruvboIocyApgbAgIQAsLrtv2hutuovEa7u45Yt14xASWAuCj/EAIoILwfcLujZLDJm72SC7uxK7zCa7vmq0noC7rYy7sfskG/675pu7jVa7ydiLRHS6VJi4RgUbn6q7mb67+Yeqc9C64DvDckVLjuq7h8q8CvqqwBvBjvSMAFHAEe/MEeXL4VfL5xq7vpa7/M20LWu4r0q74tjMKDO7QW/K9i+8IYbLEqo2kjLL8lzKYbDMEq3C7AtLEwyrEg6rHFlKG6VrJM3MRODIXlk7I0G6MVMaJFfMVbWKPLdqMfkaOYt6Oe58MNvIbhJqTjNsNFW8Ot68Kvy4nCOLEnfMNxnI3xuoc9PMbt2KpoDLVsXL9yXLYXC43zu8ZqvLyt6DGBKsAZ/0ygr2dnpQbHNmyti/yCETuqdzy3jPzAKtdlg+zHkozD/FqwlzzJTPXDmxyPNPzHhKzKhryLgjzK2vvJ6+vG3pi7YqzJnMZykLzKshzJt1jJ3mrLeGzKuWwWiazBw9zIK2fMDMvAmGygyszJsDzHyAvI51rLx+vMpOyzInjNc7GtEQbOduEAKyui5Syj51zF6UwRGpE05Kw37/zE8jzP9Gw9VMzO62zFHYvFR8zPLnvPAMARBzDQBF3QBm3QE8EUB73QDC232+zALCnQDD3RA40UEk3RDJ2zYHyGJuzL2qx5QEuk/LTC4sjL1WzS9QrMJC2JD+3QoCyt2Py/ihzLJ//dygn7ysL8zKUczaicxqzcx0ANw66MyM0MwDT90bMcyES9y55c00G9vRibrp381L3cxkodpVNdyFT9y3WMiVn901ot1De91F9d1U2d0l2tptPs0Uad1N5M1muN0m1tzaH8xmXt1GEN1VC6sEy91UjN1gLbr3sctmBd2GYtuEON1XF91nNt1W+t2Dnd0nk8sMGczY3N2DPt1nU9zjgkzt+8zgEN2vrcz9TlznITz/Wc2qq92sAC0KMNs64t2rIdsxFJ2xNp27HWALq927zd277NzdT813Kd2Tx50Rh90BZ93BN9kgMQT1bl0sF92T8i0kRhhurC3CGRzDoN3JoN02b/BRYPAAHiPd4QoJzDlVrQDdjIjMt8s1CdqZYLld3bDdFHLd1i/aS45RVPeZdsed7P3dHDvd7dvNnf3RXgSZjiOQDyLdm3PODend8IQJ2EaZheseD1TdwBnskOTrAe5RXI+azmbeHRjeGYLeAvzeFG5ZHPGpQkME8irt4afuEm3t0oblYZsCsWMKxQqZwvnuHQPN9KStnvlLU47pVd0eMlHuMjPuN0/eBwNQBMqys5vuMVjt4AnuQ/vuGnXFjuIeF3yQH+rd0MztNc/p3PmuBI7tf2bdh4rdf0mN9dsd9q2d9V/t8NLuOOKuRg8d5GHuZAfuVqHtjAXFHhTd7ibd51/y7meJ7lJ77lrwVQVn7nS57n3Yrdka7ok07Jad0Tlm7nmA7jmh7VwaXEscIC+TzbsY3bF6nqGQnPp83asB7rsp7q9/zasG3E/4zrt44+ys3QCT0Vvb7Q6e3jO62Sxh3s+hPQyG7QGs0uPCrpoM5U1B1gZoxgg+23gc7kxD57m76rkb3oxa7lxfwgRU3i2a7kTV7jcP3tmR7ujT7uhdHXeX3Y887Vom7H7B7t3J3uji7K+b7t9E3j/W7Xi33ujC7w8P4x8s7mwo3l4qrS1+64AD/sNo3fkG3Z5l7vDY/YY33xMq3tDr/vjk3g647xIG/w7o7wjqzLd73xGo+tgk3C//8e8gHP7wmPMuV+8i+f8W5+yB6/wGtO7wwP8xAv8yaP7hM/2THPwzOP8iJf8eQ4GJ69YVOPFu9M67ous6lu2q4u617/9fKM9fs89qRd9rnuz9Cz7AX962pf0BTv9DX/lsce7Mnd9tNebc4exp+e9HePbWTMeSutie3+9HBvjWTX8kHf5nR8715d8Duv8z2f2HyN+Dwv9Iu/19S68Jav+JyP1oyv1k3/+EgP9RBL8KE/9JVv75gv1Y6P+pB/+W/+8xe8+S7v+pRe9Ex/9Aev73FP+rSM07qf8rwf5EtfYq1P+4nv+asvr8ff+clP9N0usZT/+s4v6NFvyaeP/Kmv/LH/P/nNX/vaT7efD6g5P/qFr/S4b/zZX/3Uz/0+7/3rD/7sf/vXHwBVr8v3XxVXn/W1LfalDRkA4UCAAAIBDB5EmFChwIENHT6EGFHiRIoVLV7EmFHjRo4dPX4EGVLkSJIlNRoAkFLlSpYtDQxE2VKmS5gzba58KSDmTZs5d/KU6RNoz5pDgxY1ytLAAaZNnT6FinIgVKpVDwxMOhNr1pZbua70+jUl1qVWzV4VAKDs2aoLFL59uwCtW7h1DS4IKzbv171c+2adStcuXLwCDggerBBxYoUEBjKu+zepZKOUh1oGOrAg5LeaOX9m7FgA6ISYeZq+idqmaq0ESSP0/Fo2/0LRswOwlom7a1qxKnWzjC07uO3XtWf/Bsu7NwDkvpX3Hk46OvHPxoU/14udr3a/3AG7Pg6eOmnrr5uP9T45feX1l8Wbfz+ec3np7TPbP40/tf7V8UFPly8x+v7jr7Xl0DuQuQJz8+8zAAO0a0AHF9wtwfMUtLBBzh6EEC4JN6QQuBCTyzDDza47sUMBHwuvxAMvhFFDyDhUMaEPZxzROReXizHF+nys0UMWr9sRuhwRfFFGxmgM0qAblzwSwySLzA7ICa1skrYh4aNyuy67wxLHMLMM4MnEYvzyOxNtY7JJMwdDc0o5eVTyzDrd3LK+NNXbk70x7fwTz9FanNPIPiLdC9SuNu1yYDbRGn2NoZ+4Eqq3SsW69KucEoWLIU7fciAgACH5BAAKAAAALH8AGgDeAKYBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wF6ev4AAEpAif6LANHl5UI3fnJFRawoKEZbW5OGNQ2AeCwAADFmZlWDVMUSKnUtZwEWFgJVU6oAAM4XF48kVCsXAGQzdIk6OroWM6yHKDmCYWWETNkLHM6JFzAoXKkcQapdAB9vbz9fXz+DX19PT7vS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AA0AGEiwoMGDBgQIQECgocOHECEeSLgwokWLCBQKPMiRI0UDBy6KdJhRAMiRI0tu7Mhy4McGCGLKnEmzpsaQKC8iaMlzoMIFAYIKHUqU6IIDP4sqVbpAYU+eCg8AXUo1aFMBUqtWvfq0pcIGA8KKHUu2bIKoU7Uy7cpSodqqBNy+XeqULUeFBObSFZBXb9G6dg0qRFC2sOGzfP0WDXxQrmKhcQU8HgqYsc/Ek4PizaxZgOWCgw2LHou4b+bPBB1Pjsw5QGXLmznHzvyacejRo0u3Rn25NWvZnnnPnjz8ce3At3EfLq6YNwDVj3/TDo6auV/reo/bTa7cLPa5zqEr/5ZOnPrn72/Rq9XOlnt30uq1hpfMmbxx87AxTzddfj7h997pN9l8vol3HX62CXgff/f5B2CADDYnHH2Z2acYe13FV5WGVGH4lHsP6sYZgfUZmB2CyCl4oYoHOvggfCzqRWKFJs7lYU8c7hVhixP+92JYIp42YYEU9jfhjiciaSOK2y30o1hBDjhkiUU2eGRrOSp1I1ROPjlAlI/NuFqN6THZXoxLKlmmi0+CKWF1VY5H5npmZohmmWrSyeaPbvolZnRzarWlV3fSmaegdX7YZZtZLjYljXHyCOehGxaK6J4v9injo2NGeuJ8lHZoaaWYhtgoUX/K6emSoGI5qqilAv+oKXicArrqmlcCF+pesb4361srMUYRZwuU5GqwgVGEQFr3LQscsnYpm8C01FZr7bWIOTudQtx26+234IYr7rjklmvuueimq+667Lbr7rvwxivvvPTWa2+40LKlbE4XTTQYvxhphNpHOAH8kEoFG9yQSgPfpPDBDj+88HzM+nVUUpNxNWFWGaOVmcZwcvzYVSIrtkCqflko6Xmv6uhqq7ruVquqL+e6X82TurqrUijrpfKnNpe381+J4tiylkc7CieROLM8NFGnUlY0l09LXbVQPc/1M6tBL9h0fld3FnYAWb+1Na45xwwczDePOHPKgZLa9YpjD9pW0lDjPVTZap3/refcB9Y9NaGCj823Vn5fCniSXydYuMxLU9l4io+vvXiakLPMtOVptz0d20Jnnt/mn1+O5+RNVi5k5JCifqbqRnYeutusd+q6nbBbKbvXtGsuOedO63y7orm/6XvrwIMtfPKOL7/68bYzT7nzsQev9vOj/1767nQPb3Txfr7tc9ywmm6o91RTH6b4WpPPq/mICg4675nl29WwHxv7bMMLVXygtuWx31OU5b8TAfA+90qgAhfIwAY68IEQjKAEJ5guAfZkXxIjgL8qkkGGfYZgGSQAwkLoQcu8pCYoTGHEJLaTCRXwLRcTwAvVAjKWlcxiHuvY4O4GFi8h5oZ6ORn7/8zmPqTNr3vSa5KP+BQ1rA2xb0Uk2hEDhz5CLTFTTQzK4eASxbxNkXFJPNMVTaU3J9bOVlW8G/iABqcxyiqLZHsi4rootS9iLox2cqOv4LhFqiRObtyjIh4Vpcfu/EotfVzKH8sXSDBuj2WFVM4h5SNHLt7qb4284yPzE0ncTLIqiVTKIt+XydMN0midzA0fK+lHOgrFbh2Boyx3GMtFMbGMWmSlIl3ZGTuacpMJSqVoPkmVUBZllEaEX6Xk1yMvfWmVZ6TZKdN3vepx0pnEXIoxiYJMKSpTVMxsIzahCT00TpNw6ltRrwxJzuwhD5jTq6buIDlOXMYxmnC7pOJKef++c95NmMux5zaH0k0vftNl/qwlQAuTTZ7pUpS8dA0t72LPWa5Tku1MEOms2Tx5qrOZPoSjBXmCv4zpbzojbQkBgXPAFaWUJdLClkyvNZgZ0omCOM2pTnfK05769Kf1emlHlJXCosqkAUQ1qlEF9sEVPmyEHWSqCZ2qMKiycD4JcGYDFJJVRk20MVix6VZyODJYUhSIcyGZWKkiRDh19UkleSsWvyqYitrVl/3EXoLk+qK4hpSuoLFrOK3nOSm51Zl+9Spe45fGWq6RVof1UmJvudhlNpaij30LVhHL1b9WFpyXBWtmETkhvj5osnP9LELhmbp0hi+ycO2sYg+KtMH/Ks+jr2WZaQGEWjKqtrahretoKQnbH/X2jYBNjWCDG9jhgrK0nBXAbtmZ3Ms4l5GEnZ1eUzTd7hx3j9V9znITilnXbqq4fZUtZWlLNNt2tLDrQ+9p1Zta9ubNvfGEr/Hy013lfJe6v20vc5V7XW1CV7L09a19rTZg65oXsrqNbn9VGd5Z4re1uD1vhBEsXc8u+JV3/bDYRLdXCXuYn4wlr2gfrNkDx7bDs0WxZVUsXBaTVr68TTByA3zfBou3wEoRqke4OtMiTyuuRi6yVIVVU5auVVRCRkiTp9PSAwH1yljOspa3zOUufyvKBsGgxDbIkKiahH8nMXOaWbjkZGkE/yZKLSpVDdZCOD2ZKUiR4cfMCla0wpCsJuNzXXvoVT/TcKCQiaigm2vjfdLzr4Z76DEVXeHxslaM9Yw0PsenT0Bml34cTdFCIUTiFG10nrfV78quCelSN+nUH5UxaGkc2FGTpaFKK6c0L427Rnua1V7VtK7z6WMLF9uWcxW2O6PHa+L5GrvAvqWyNaq9UGNY1Wx8dLBdfSZYr/q92rU2plvdu2Wbs9nfezYptS1tbtvJ29lOdbhRHcxMu1tR8OaarFcr7jzau9zUfme/nZ1hfbM72fc2Wr7R9mkkoptLtobRtE1dbXrnd96xPjgZJ/7qimdc3qC2uBL/vd2OC1zkr/9TdzLFSe6Sd9vj3754yD9e75YbdticPjaQvcnybXMGzAUp6chOGkD+VdmAd0Ya0AkS0yTLdMrl8bLUp071qlv96vgyegjJTMI2R2vOALPqU72uL7DzS+xVpdjH8px0oyxauYbWilr3XOm4jzWsH0N0UApaRxFL9MIpL3iLN92+TkMb3DOP+bUxnltz73rg6RY8JhsuSJe/G+bxRrzDIU9NbEPY8cSmNYFVznPKO/LmoM+56B0seUeDfPPxxXnhdU56g+4buACn+MlpLnPY837xiddw6me/+h/Xvu+3F3DuTc5szqOz9b/WfOVRH/Dmo7zX0D9876cf++ET0fDrfj3/938feM8PXvbfp332wy/903e/+ud2vhqPD2IeM3j5L9+94svP+Mxv3/37BX+Pd30EZ36TJ34A2HgCGHoP93wG6Hrtp0nUp3vWR37Y94DR938S+H4UGH8EGHkYqH3A53sK2IEDaIEF2H8Gh4AbqBhL5xK8VCyU9oIAsFJU1nZ5Q4M2WB5HtyRY94NAGIRCOIQJpINkdz//omZoljBVZXY5UUJMZhJMSGdOiBJ1xjJqp0PzYXdsBWg4VHc4KBRzlzHzoXcSZX8gBngXqILnlx9m+HasF4Lsp4G/NIFN8oaVtnO2Z3otWIJ3KGl7yIJ1+IGdx4Y3hoWAiHx8OIgoCIKG/0hciEh4DCeIeSV/jkV/uQQneIiGI1Z8xoZ/drKJfmdRoyhQEyKKyddjnmhpdngmqLiIlUiIDviIz6WJiVh/paiGKRh8n5cgr0iJKdaA87d+DmWLkniAEciI+7eGvNiGvniLvZSL6ieHxRiJ6IeMdBiLjViIzXiIbgiNZyiNq6iHe3OK4AiHxkeMpQeMM2Z5ivKLyaiNy7iLJCh8z3iMEJiNwWiJ5aWOqGKO+JiBIzh+8+iI3agVRnhmTQV+Smd0YQhiPVgmCRmRdEKRiEKEGJmRGrmRRDiRWydmY6eQUyWFXUeSSriQaxaSKZl2LrR2GFNWYEh3XLgUNZQfM8kUXv8YRGbId7iYivcnjJfoj+UYkK3EkOsYj/soi8NIjbnmfVBklIGIlO3IjysmlGZ0jXMElYrIjrMGlP3IlP9IlLuklT0Ji0m5jbN4kLWIlZY0jbQogvynlsUEjjwZjT6Zhm4plwYmlhBFlnZpllOplEEJlkPJlkWZl/W4glLZla14efrnfwOZgJAZl4npjCbIgFRZY1b5l1zJbxzIfB6Ilkv5lnMYmX1oj5epel5ZlYRZlp2Je+6ocJinmPoYmKI5mKRZjU6ZlYhJkJPJjJXpjQuompnJaJsZjnfZibHJJQuHjaapjL9Jj77Zi6lJfKupma3JmYvpmQFYnek3jpiInID/yZifmX8VWJDcGJz5+JzyqBcJSSxEdx8T+ZCdYZGVMp9OBhwcuZ/82Z/+yVMJGaAoCWdxhkJHOEAHekFVmBIJSlINqlJlCJD5QWjrZYw2RJ81eY/OYY3fqHE7JqEseAICcAIC6YobaqEdGm1iUQER0KIuGgF8hRgRGkgYgAHcUqMY4ABHmSIniqIaWnNjUQEKMKREOqQcIFcyCqKahwFDyi1EegE7+oc9+qNUOnJkAQFFmqUPACMz2nAl0KQKUaQYEJU8OqVVKqUqGgFZuqZvlaQ+KnMdQKROWqRkiqZmGopKaqVAwgFrmqUgACXoGKgKUqRzSqQdsJVneqd306VA/zoAD9Cna1oBQCKooIMChBqmWYqoZaqoePqm4/YlkLqmEDCplcY2H5ClhUqkIeCaicqpYMWooiYWIBCqkfpMpWozIYCqmJqlH6Cdm+qqXAKreiqkWboBFAAuGaABbsqh7xWnlyoAa1oCrPqrwLqoefqpA4ClRXqs4ZIBy5qi8valRHqsFLCmY+qrdlqtFCWs2JoAxcqt3pKs39qqKXiqRGqsG5ClUDqt6aqudcWu/jYWs0qrkkqqAMuMuUqrvcqvJuqvtXSwqHRrtDqqgHqru+MAtIoCddqwDvuq1xqwY6GmkIqklDo3ztqnUcqxHRtYEAtxhQECEBCzMgsBW8qlH/8LnAEQAhewszx7AYe6sZ26six7sxF7Ysxam3AZtEJ7GS1rRfXUtGl5m+u6tEPrqSBboUfLnuupsktbsh4KXlA7moI5tQODZmY7kgjgdDP1oDDFtkMFdfJ5tiP5n3Rbt3Z7t+/ikRnEdSc5kgKKkktYkitJZ1k4MmxHd2H7sERrNDdZFGM4MjtJaYlLtllLmdNpmaB5guhJKJPrscn5dwnHnLM5ifRqNJ2LnbnZlMNpncWpXKdbtePJnX5onqG5udZqtUq7nbDZmPg2us7JtdRauul5ucLpnU95bK/ruuK4nITSnFubu8EbvVprm7PrmOcZnaa7uMG6vLwrm49Jm/3/Gr7AO73k2Z2Zi5lj67mVC73kK7uoeb7Emb7/qr2cy73lab21i73bi7vvaL/mS7uaq7/1y7/Z+7kZZby8uYrJy7T+W729+72kK73jO8E4K5nUCb+sK7+wC67CG7XEC4m72ZYKTL+3G7u7e78PfL3gS8H9S8DDa8GYC8DoK7Xqy8ESXMGnORfvmT8zKLdR+LdoS58SZZ+igrdGfMRIbMR6O2Yg2YQi+cM+7GZPLMWDG3ZvVqApVLgmc7haSMKK68KE0rhuJwAUmimR65fiacPiy76Wa0DYdMbI68WU+5pE40YWcMd4fMcTcGtwPMJgXMJ0nDdjZAEMUMiGXMgiwMd0/ym5clzDHSy2K3JFE3DIlMwALEAafXydG/zIX2zCdUwWElDJh2wCewwkmdy6DPzHnRzIUrNEkyzKh0wCUHLKGqy8qjzHuvvJYiECsEzJpZwAtEzD83vLjoy0e+EjI9DLlCwBprzIaOy169vCnizIu0zJKeAB4OIBGvAlwWy7q8zJuGzMSOMjJEDJ2Nyt3OzMcUzMwzzNrSwWE2ACh3zNyLrNwKzOfhzNBezOEDkWyazMDMDMzcyXk/bMFqvP+8vP9UkWvKzMpZzOBM1NjMzOm3zD0unGY/HKsCzLs4zPmmzLCD3ACj3EZRHKokzKmOzRqCxeC8zSpThGEyABMj3TMv89Aooc0QQ10SENyODMmjz4xipdy6m8098szrrcJt0swDxt0fvMyv3sQ0m9wmw81dLs1AvtQztsUj0MuFzttw7JUmr7dEk81mRd1hi5xA/Dt2w2xV/H1mXn1kgI1whqkmtdxWenxRbDxTDZ0tBskxiak2kV1RG8xlXd06irl7q5ut/50UOtxixskOq5liF8mPns2FQN2R8s2Yp9vJVt2BXdvif8v/gbwFJd2Eyd0FYNuqCYwvlb2k3t2SCd2geMwYu90n0N23GYumFpmGO5zkQdzqCtfMx7N85bopeN2ridjtl5T7zdl75t2aYd3Ko43LVU3Ekb3Y+N3EY93d0ruhD//LvHLdLQjdkwXLy0zdmM7dIUHdu5zN0o7L0qPNjZLd7J/YnUTVHWXZrzvdSETd45HMOjPcPeDNz7XdTS/ZPd3by++7zYHd783cb/bd4yHL/C/NkFTuA4DJ0XPOEZXOHsfdr0vd0I/hgJqaOQYuKP4QBuO2RyraAt7qAvrlJZpOK4RONmfeM4nuNbBsRQ7NVd3eM+HuRALixbeABGfuRInuRJLhBRoeRO/uS3DeIPHoog8eRWfuROUeVXbuV4HUR6HWgHPd7a3SRiPBSPazKCDd4NvuavLeJ4udrw3dry7eAGfuE+jdiqi8AinN5R3t8vHOEgvNkJzOdhXt+s+N7e/x3fat7mfh7iB/7m9w1W+b1yv13MjQ7JmT2XOJ1oBs3Xhe7mypngxL3gxs3mY27nhx3Zmt7cBf3chs6Jqh3pdTXpKUvnGG7qHlzega7nlE3ong7rs83htS3U6l3p7Szbprjpe6fTYu7oqG6cy53mDM7oz/7hjx7qiK7g3z3tp27rlg7hGg7grE3ac47rU27uuInnuz3Zvd3ZUl7nGd6eGx7gFD7g3+7tx97eIy7a4y7gSg3v6H7r/h3ualHiVILiikHjPE7FUdzWYUPjD6/jEj/xFE9BC9/WL/gRDf/WF8/xG38/DRDyIj/yJF/yjY3bfV7tJ8+jWr7lTp7lLm/lE/82Glv16Spf7M1+7oz713iXMTMvGiWxwCmP7xYe72fpF6YVAQ+w9Ez/ABFQFkHfyEUf8PdO9ame6UvBVyoQqnwV9euN868u9dCu20PBV9rapxQrFl5v7NZ+80N/0YBeFXK1ArSqAAUbFmuf8wLf7VY/9QMv72/RpnxKq0eq9m/P99RO9G1v9NSrF2/1qHWvAC1g+Dav+Ct/6QD/90fv+EAS+UTaVXmP8pVv+WAP6rE+GVmlAeFCAfmqpXh/+M5O+rCf63FPFVmVAeJSrlkK+rOf+Yj/+7EP9wSvFamP+9/C+q7/+qOf+H2/+M2f2+pe9lAy+KFa+Mov9MvP/Nr/58P/L/chW/d3PwCh/+5Vv/3Bb/6YruvePxZnL6pkMf6YX/7Ar/Pzn+6qnvVkQax92vW9v/fnX/8AAUDgQAACBBBEmFBhQQEEAjyEGFHixAAGHVLEODHBAI4dIzwAGfJBhI4dERhcmDIhSpUtWbZU+RLmQpkzVx60SbNhRp4QLfbsubHkUKJDT+LMeTOp0qUIazZ9ujRq0p9AM1a1SlFoUa4ljzZ1ihTs1JxkbZqdiRYm1qwS2baFuLVr169gB6qNKXasXqh8pfqluhOuW8GD4yZAnFjxYsYuDBqwC8DAY7uTBUCuTDnz5ciWMYO1fNFwAAdv4ZY2mFr1atatXb+GHVv2/2zatW3fxp1b927evX3/Bh5c+PDXn5t67qz5uPLlnDcbX4r8eXLnoCMLNHhA+3bu3b1Dzu5d/Hi8ea8zPF8+JUoD491zZ/9efnoBC0YHWHDAoP3RC9TrpC/A67LjzzD/BDigwMEWoO++hwgwyMGKACuLwrMsTAvDtQobzTS4GnQQQgEk/E+hEpmK7MSwROuQQ8NAvE9EEjV0iUbzBrRxPRcH87AtGEeT0UEVCRryrhwBxJFFw3rM6kfDgryvSOyONJFKFO1i0qosgXJyMChbFDBFK8OiT0ked/wQRwm/XHJMIt00Mkws0Wxry566hIvNM+Xci8++zITLTp7wbEvPQP/hnNLPv8qcEVAf1QwxQiERRQ9HRQNzNCtBMyI0K0PrpFTKSsVkdNJMrerUqk81DbXVSys8FahNMUoVqFW1dNVSXcWMtadZKaq1p1tlzZXUXefsladfJwqWp2F9LXbOVy9M9io6m4Q0RkmjjLbPY/eqFqNlJWo2o2eV7bavaTMMl6JxIyoXo3OtXXfDemtsd6J3IYqXonnFTXfRb/9sVELokrLMgUgFUHg01A7OSTrQmIuOYoT3JQ1j1IjjuGOPPwY5ZJFHJrlkk22D2CaJm0sZppUrro5l6lp22eKIbVaZPvneAw/Bnd0TNeiAA5PsZ/IOas9o7+hTEK789rvvwHv/bzRWzASjJjDqfif6192hK5xax3wJGxvebIHcFsyBBa4a2YId3FqirvX9+sKwdSrbp2tRPfvJtNu8u8rAb8r7IYwDiDuiuQkbnMy1MX37vsQhWjwioRt/s1RuC0e8by//3vNxsEWnNvLRJn8Q9EMxj5N0dk1/0fM8VQeV9URd35Dzw1EPoHK9bR9V2iRhH4x33w2vO0PgD99d9kJpZ3X55HMnPk0x14QeV+ml131vLp33NHtit8cd3+ofvX7hGclvG9zzsU1f2xEnZV94Xt/nO3605+e2fm/vN5WEjCc+aPlPXcMLINzApyoCosuAbHNbAu9Ds5YkbGENM8zDZrbB/+lspnCo+eDJRDhCEpbQhCdEYQqJQ0GVvOxiMXshC1PiwpvBMIYcnJgNI6YzpXWnZz30zuXKRzVpJQ2I2onPEbXDtKjppz5Ym16NpHe1/mStPwPkn9rad8At/qV7nMPi+oaoI+7h73v681sWATdGJHURchI83QJt1UB6sVFwdiScGe8kR2HREWAPJBoCNydAPjrLj14D5OgAOEgFovFzagydGxVpvwgyUnKFNNch6ZZIuwmyRWDEpLw0yTg8Ok6SpYNj7Bw5O0iurpSZe6WRvkjIVT6vlbWL5e1O+bpUFi+U/hql5aJIxP9V8pO0nBP2bhm9XAavmO7rpfWSqT76NVhTiIs8ZiOnKT8x7tJe1vQetEBZy/AtU3vgLGM00bfN/XWTks/kojHbNE52ptGd8IQgPt9oyTiSk4HmHB86wTlLB8lwIRaUHwYHo8EO5tCgCqGhyjSmsYAAACH5BAAKAAAALH8AGwDeAKUBhf7+/kc8igAAAACAgNDN3Ds7O+no6f4AANvZ58jZ2wF5eUpAidHl5UM2fkxYWG9GRq0nJ5I1NSsAADFlZcsQJs4XFwAWFqoAAHIuaQBVVYslVrUYNmgxcdkLGx9vbz9fX0ofSF9PT6MdRQxOTrvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQsCMCwocOHDxcgSLgQokWICxIe3LgxIYKKF0MGyCjgo0iRJDmqJJiQwYCXMGPKnKnAI8iTGFfqTIhTJAGePS9q1KkyIYGgQgUcRQpxKNGOAhLMnEq1plKmEJ9yBIq14U8BXR061VrQaNiGZs8GGEt2YEKpVOPCtLr0bFuDXMN+VbtWwN2yV9WmPcv27lu5iOny/csSrNq9gv0yRhiYcOWwhdseRhxXsdrJlPlCtgwawGDMl7tmJruZ81TPdkHn7ToadenTqlNjXa21tWuZsMPedny2du7hdVEnPy476u/XuLEOFz17t+TJ0ZlmR8r7qe/nL4N3/53+uLr264y3B1XfszvR7+DFS29Onbjt5sut5z8/HC74uezhRF5x5nGH3l8BnpSgSO7t5Nx/AOrG1IB6FbjegYZJaOB+BvYHYYQcBkUhbRa2h6FmGl4YookefjiAfBPSV559zGGXookr4tTgSvA9ByNSI2JlnHXI8bVgSDsW9eCHP4ooI4E0EomfkTfqeCJrS0LYZE9BMjUkf1NGlqOCV/aW5X9bCvhkhVGCaeOYDFZJZotMHnlRl0h92WGYlsGJZJnenRmfnRbhGZSeFxYpJpV0akkoRAZMZkCJJy2QAKVkRsrYpFHdZF0Cnp6n6V+cJqDAqaimquqqVoEaWUKwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyCarbK2j3lUqAdBGK+200yLwLLXYUnupAM22xakBCGQrbrTbgjvuuOVKmpC552ZbbrjtYpvAcKEGJRFFZ6XUnElhkcRvV/ra+C9W/tbb0wKG9oQoi3wqx2jDufl555oktrnnm1RKbFGSW8kZp8ZZUSwkpnEq2ufDGC/6mchekvynyQ5HBnPEi7Gcp8tJzawfyul5/CfIDyWM08JW6nwe0GIB+p7PSSHdkNAnET0nxDvLTPXRNdtYH88IMr2x100p7aDTDD0atM2H4vy10RtynSHZfcENtU9qh822im6jCLfZDs3/HZLUJV/dttUpn7yy1jPmjeXeYD/EMVSMy422wnU7LjaPjSfN+N04Zt3z1oT3HLniZo5++OeJh9616aQJjvfpXYPeeuExzy56xp7Hnrrtq+Ou+tu+x4Y4lKQHyvp9tNMMe4ayI3+7yrwDD71wkw9dedKcW7m5652r1S1ZnKpl6fVocatup5G5atn3WpVqsInqo7bs/PTXb//9+Oev//78998r+0+5VryqJcABkmtd52OXAQ/ILXgtEFrp2tS6GJCAClrwghjM4Loc+MB5Ned9lZqIQvL1uIPYhIQlASFKSogXAbjERVYZGFMQVr2oka9sl1PS8WrUM/84KnNPqyHd/yyWKO5pr3hL8yGa+BbE4bEJiWMLnvO6psRBAZEhfrsI4F5mRDJtz0ZV9BETsSjEv92wL9nzIhQxF8bfpOkkWbTIFnPWxY+tUUltdM0bRRJHiMxxbXX82R07lkfO7DEkfXzIH+0WyKYNEiqFTMwYA5BIhyzScmm04+9QFEm5HHJiTqzYI014xbiNsoWd7MwkK+mVM7IQMDuUEhhd9KJVllGLrsxhx2Lpph7S8pOFuqUcc5lJQW4SS6msii1DOTIiMix5VYseJ3+5TNQR75ilkyIPqUjNUrKSIZfEXiO/9kVfwrCaurumNBenTVmas07eFKYfiTnOsJWTm+eMJzNb5v/MotXTcvfMUDKho09rPhGbxmtnL/EJT7gBkCjhy9e2qPRQnbgvfSqMU0VXUipWeZRVb8non/xH0pKa9KQoTalKV5qsjaqkVBmMaUwZAFOZ2tSCCJRgAx/IQAV2MKek2iBPISjUoXrQRgqgJQMSklR4DkeGSCkYCp8qUotItV/DaeqHtqXVJeoScgq92PMMJ7yedfU/XM1nMR2J0KXxEkjNOSt40urUf2rulLAMq5OQSku6/nCt5MRrY966V7P2lalqtWv5Aqo3vXIprocVgFzd+MrBOtaf0MTa8lA02d/41auAtadg3VJKdGaos675rBVDC9DRUoawj+Wri1Qrxq//khK2mB1r7agn260itq6ZHdw6szm98UB2tr/9q2JxyFh2Fnc+vYUQbSlr2xbidmrBfV1Zu4ZazkxXj5Ul7XUDl93ubfe0ke2uJ8P72stiV7fKOy9n05vY8h6xrVF8boyii9bkgna5psQv5saLyOP6VrL1hW80pyg9svLWsMhFMHAVrNnhJlS/cOXvXP27WgBPkr2mKW1BuUvfCfcOw0W0rxq9d76PuhhVXH2xi4HqrJBiNDIu5chFLRO/3LD0x0AOspCHTOQizyrHGyngAq31lqESIIJB3alRi8pTKNeYWxS86U2p3EF65UuEVcUIiE/YLzIDbMwu7CZU7fXNAISz/3ysvauA8dhNye3zZv1874kdvE2B1jl3zNsdgxuL4me+84d2Nqgo57xL95L30F5NdDoPamG3OpqLs8ynpAOtzkE7l8/uZCiiAY2i5vWZ0KBeqJ81TWosmTrUDd7tqZH55826WtCzJm6qxSrqSLfaTK9WNaplDetpstrWwMZ1sT9NbGHT+tjyvXWnc33hXacY0lbcdKmV7WxdN5vXq27orwMVbHAPO76eNtNAaWLabU972d5GN7WTWOtoJ/vd3a72t6/d62yPe2nl5nes5Q3vQK0bOO2WNqXTrW+C55ve0H7wpBdd6fxa29D9rq22FU5xhlu60LnNOHUdej7xTRTH5//r8afCHDYkH6SjMv6ojS1j5Jrb/OY4z7nOmZVyJzM5Kk62sre4vMB3BZ3GQ5dylYluwKP2jOUOudcIy1xdwKz5YGYmGJqvjpOrAqzNb8ZhnBfr2hATGJSKbmbZPyzisTP33w4KOMYHvuB5W3zfsU07P9cu4ubGu+7Q1Tue+X52QKpYk/YmN7fNzWyHM/7vFZY4pxdu9wFfmo6HN2biAb54gZ8b8A+/u+MLO3G1MxqsINcz3SNv3DunLc+P3jPeQ7564Ure3ZQv+McvTvvPsz7wpd/76W97ecNT2PatF/zrCV98RmaerZuPe+fn7nvk6170oM+w8ikHe0wfX7sex/7/7/e7fet1H/PfN2/4LZ96PspTkfR8fmCHb93mn8313Gd++70v+9HnPfiDR395tX/o13/ZR3qT13GVp0P2J07yJ1osplMml0suZxA7hhoqJyopB3VJk4EGsnMgGIIiOIIkOD8VWBDf0nNTZj465VNFx3QDJHTgA4PxYnRGNRxeRnVUhUJcVylbx4EM4XUEg4Pv54Dpd18Vx368B0fN0WZoVnjOd4QrFn2Y44RVZ1kEaHwGOH7a1zVW6HYBloQMmIUhYyNf6GFth4YbZyZn+ICtJYBYuITuZ4ZFCGdqqH9yWGB0iH+913gHSH1+yIUIiCJtKIWIt35jmIdo54V1KHZ3/wiH4tWATdQzhbiF1hd6Sjh7TLiH5ad61Qd+C9hoZHh/lNiIaPSIYiiKihhMnAiAnheIl/h4DfeHatKKCXh9meh/fQh5sTiIWFKJtQeKuJiImjiHpciHngiLwoiJxKiLm3iMnRh7wah+oYh6q1iG0OiKgMiLyyiLu1eMepiNt8iMqgiOBTiNSBgWJ0gQKSiBKKdTHngh8Wgi6zgQF5gb82glJbiP/NiP/liC9SgQSmZAP5cAR8eCUeaCTUeD7WKDS6d0PwWRRZeDAANmU7UvQDgSWTdDGxlVHWkvH3kwYBd/hqh55jh/10iK2mh+eHiSEPgQHIABICAAIIABNomSLv+pkuMYgKlojTkpZw7BAQdwABIgABIwlAdAAS/5k5O4kjZ0flqIjlNIbB0wlLCClAeAAW+Ykn1jimF3im4IlLKmAUh5lUjZAVvJlGSEjEPUks54iAGAlWaJlBoglm8Zjk7ZlpDYXqMoliIglwmBlUNpl7T4jHlpRlAZhZbYjdrVAII5l0i5AWTXl025k8u3l2YniW/HEBvwmIEpmAfQAJvJlZWJewo4jOV4l9BHloApAKB5AJNJmmsZjYjploV5iFWJlEV5lIKplaOplpTklSS5mNS4YBiAlRcgARcgmEoZm8A5komJSagoa535mkjJAYQpiP9nmfmHmWy3Q45pnQf/IJnZ2YvbaZqm15PER5m/yRB/aZ1LqZqsSJu4FJ1GSJzp6HAUsJ/8uZ91mZbyiY2HWZ+2qZ35eZsmmXwDOkz2aYdh6ZwBCqAIuojcyZLe2XeaGYbPKZwN6ogP2p4TipMR6hABCQAR1S8ntz4beGM8lpF9kY9kAqNx8o80WqM2eqMoVaLtmJAtmEBZpmUyhXQziJBXpqNC2j5EaIu/2BwvZGIZkqTiWBqE2IRK+mxOOqVVyoZSmqWBQqWZJhMT4ABiOqYOcFZWAaWMyKVbWoVemqbYNgHWWQFddaZt6qZ2uqZaqqZs+qUwAQHi6QAxQad6uqdRiqcOgqZYCmkOIJ5D/6lVglqoeQqphtoxiLqkfKoAFcCoBxABEVKpliqpk0pKntqlzeFDD6CpQ+kB4YFmo0qqoeqqoEqoDKUAqDqUELCqV0harbo0rxqpd/qpDBUBtTqUE1BLuUoZu3qovcqrdZqoDLWogpmctGIBGfCovwqsT7qsyjqolFqqMeGnWFmUtWIB1pqt3Nqt2qokyaqu3goTcIqc4ior1FquzmqusaqtrHqukCQTwsqoxYqr68qu+hqq+XqvdBaojMqpndqs9dqw6doYAQsVjfISp2qdc1qwBouuDyuqDOur4TYXEQABIjuyEBACCHesIRaxEruxKzuwgDGxHeayL9uxLIux9v8qcuCFsjZ7syyrqzTLrHz6XzILsT/7sDuLrcZ2pUi7tB7rLQn0tDwKjzEnc0SadEZatUN6tTi6tVzbtV47LFdrkENVkAd5tWYLtQnJQS8okU1HkQRjkTpYtNuasVDRgyuUQvkCnXynshz7oRqqoBX6lHsrt7IqlXAJfIGrl+rZQnzLuNMJuOgpfIs7s0Prs36bcPeWe+TYsnTbtyUJfbfHcemJiBp7rU2rjMVJfgs6Tx0Klp3ruJXLl7IZnGxZmxfauJT7uSKKuJHLk6TLuaYLq/g5lZArupL7u54bvECru/HJu8bru9WYvDzrsNyYul2YuLY7uUQbuyn7uM6buaf/ubnSS73Ca7gJ+r2Kh2/eOLfKy77mC7rFC76jG72w+7q5O7yHq7rYS6C3S7gCy7wSqr+9e5naa7n2u70AXJ6+KL/HS7/3O71MO4sGapj7y6CDy71PmKF6278YrLMYGoFRNoGte6QBRMIQZcIWhcIcNXMY6KJf+8IwHMMxHLY+N5AxqMIvhcM6psNJxsMvx5DnUi4/CqQZ5LYzBLdn5sG4i8BdY7chQRJNqiUbXMDI6r+l+75fc3CBOsXIW7/tW7ifCD9TQaZkaqZc7MBMDMGnW71WUkXQCpq3usUcesEHbMD4+ydKRKvi+QFyXLv8S8Xd28Fg+B39GqeOesaoCbxq/1y+YdzGCKupDzAXiCy+XrzIy3vHSeFD4KqpTaUAk7y+YBzBl4zFYeMf74qqCuvJc8zBdVzFl7skmzyU0jorEjACL/LJr7jGjCzK4mcg/lGxumkrFnDLqwzIR8vLufgpc5GpSDnL8poBxOzHFszKX/y/mJzFMXHK4qmw0Uyf02zMSmzFPomPMhHLr3nIxdzFD0y+o9zIMQocjBrJkpzOaGzHluy+7hwnVVTIgimnfezNrEvH1XzF+YzHNBEBCJ3QCP2v8yzNAU3N9xzKbPzOMITL27jLutzOqOvIFU3PiTy+yGzNpGw5WtzQAA1/IxzOgoyGJR0eVyvCFIW2RSrTSf8no3g8tR4lwzq90zzNjzQ8tjZcgz5sgUONgkXNjkdtj0CMLkstLk7XNRkpdVF9zBmNz1jixBchhDNk0buI0V6t0ROdv9c7wN0JzktszwUNv+jLeeqby19t1VWdzBNsjKuL0gId0SKd1rsrwM9LwOqcxuwM1xJsnrV40pY0nHhN0CGdmiEqoBX80GYtziAd1ufL1wwMvR9dyYEt0YPNmOfZ12X912i92Iqs181r2emruaCc16Q92Z1tvQuc2uG72ood16xN2WqN2myt2m4N1m/N2d84oqUJ2hYa2Sv9ymso2/Ob2evc2pq90cS71tLX1hft29Yt2MHd2DpJ1sUt2q7/3MqBnMAQKt2YI3ddfd3Ajd297NmFXdeHndJn/d0DPc7a3ZUObdcQvdm3/doHyhRXK5pQAuBd0QBJLZAFbqIHvqNXRjYEfkUN3tMQHuESPmRn26NRe+EzbeEYbrWS8lQI8OEgHuIiLuKR4hEjfuIoTtW/vd+8Ci4o/uIgrhEuDuMvbsRRhcRap9Lg/YM8GNVcnYy2XdsrzthzjZePjd/GveMeDIXbTdyCm9/O3dy4vddj7eSK693hPd+und1FTqHc/eRJruXPPeWnXeWX7df1LN+JXdrQLdaxvduz3dvqnd503oz1PdxnHtppnuVrvuXrDdufnefdvecqjt523uXz/+nerQTfkj3mQ07fiO7YX37lhK7jYj6As/vj0qjfQm7oRE7YFDzp2YvlhT7nhw7qdH3k733XnM7mZB7AZq7cDczcgB3ltf7qChzoso7ZlCzlj+7nco3qRi7qf0zqlt7n9ceetGvYi87qtj7auD7euj3dvF3dpt7p1w7pwn4R/z0jAo4VDV7hG56158PgY/TgE57u6r7u+yPuGf7u8M7hGh7v5L4pDHDv+J7v+r7vat7qju7prv7rjDvjNH7iMl7wL65ecrFUx/7s/e7wfI4iWG1VIdl1Ch8X2xLfpV7n2B7s7H0ScuUADzDyJP8AgCoTGd/oty7wvg7w6zm7Z+UB4v95VikfuxvP4tn+8sB5VuaMlXEMEzXfyjff8Tjv8YCOE131AZrK0AMQ9JcO7Sy/8i6f7DC/zJrqzy/h9Mgu9Tn/712P6TsPE8DMqCab9UMP7EWf9kT/6R8vEp08rAfQVFrv7y3/9VzP8dre9iHRVGMvy8oJmvI89w5/9mi/9nl/9CAfHqC5m6Ap94Rf93hf+AHP5dtuEVrV95B5APLc9I9/92o/+XYficruqMxslZ85lFjP+Q0f5KAf+V5v9P2NFF31xgcAmUwv+Kz/+p8v+bxP9WH/rWV5+j9v9qsf9VA/9ZB/6np/EWd1ynNJ851//KH/8Lnv+8LNECE/8rAy8if/HxO4b/wRD/HRL/pVTxWwgvHjH/7Vn/yGr/PXHwCoZQECMMxU8f3IL/2uz/6tr/yI7/ZxkQEAYSHDAIIFCyYQIADAQoYNHT5cmBDiRIgSKV5kaBHjRY0bJ3b0+DAhgQAlTZ5EmTLASJUtVSqAGVPmTJoKSCQ0EBKiAZw6HfIUkNNnQ6BChy4sepRhUqUAgJJ0GTVAA5ZSXVJNmFXrVq5dvX4FG1bsWLJlzZ5Fm1btWrZt3b6FG1fu3K9GjzJVineo3r09m/L1CVin4JCEPdpVmhDBYsaNHT/OqfjxZMogj1oeitmnZp0SDVAG3dhzaNJNMwpYYLXlAgQJU6tOuYBz/8jZHmtvvI1R8WvYJ2ULQMC7d8kFpiMKGJ6SQMLkKHNzVGj8OcXpHwVAbV6yavYAxgEw5x5gOfLw1StGN21eJPqm27O7b+4dPPfx4VeyT4z/sv7M/Ddftw++5OQjj7753vOvswRpW9C2BnEDsLwIuSPQvvokJDBD6R7UbUIEscuuwvAu5E49h0xsCMXTCASxOQGHE9HAAhHUMD0OoWMxQA/j29DCA128kbograuxvR2Te7G3GLMjkcYNi8wvRwlbHLDHEX9EcsjzoNxPyhKPhNFKGXXksr8y/6NyuCRhW7K5JoE8U8E4GUyztzVVazO5N7Oc08E+IawTtjutynO4Pf/V1HK9Pzv08kP7Cu3tUDsTPZHSFC1dccNAVRtUKkhhk1RQTI9bFEdNddyUUDGZxBLRUoV81bpUpeo0qk9VC5XTUb/bVUVST53y0VXdbHXSWLd8EtgvZ40KscASauBKAaLNDitnB/Mrr2zv2rYvMGHDitmWsKKrXHPPRTdddddlt1133zXr2sK6fTaov+jF1t579dWWX2799VbewwgkLbTIgCsYNF95PVbRJz9LeLLRIn6MQOGGY8017n5ruNKOLw1u4902vtWqXK1aOOVeVxa3pVpdKlmqk2ld+eNMbWxZpZdbijmqmaNS2eZfcUZVWBt9nBHOZG0UmtecU9pZpZ7/XfrZpaCXNrJRIJ9GaeqWqna5ZqyjVNbR8LxWCWydxWZ67P24PinqlNBWrlhRm76a6GDPHlZPu3XFm20j4TZJ7q77NvRvlAUnu229lzXaSKTJdNvMytEsmu+jpaXc8awvV5Bw7b5lE/FIFac58MBFv491ulFSG2rGuwSdzswpNB1U1IGe3XLP82Pd8JNePyl253r/b/XbQ8wd192tRl7O2h0MnnQ8mzf5+bBVnx7Q5XncfEwMu2eUfOiqZ13gjYCiltVpubPWNMPWx3degOsVPdzwyIW3f///B2AABThAAsZFfRiZHwLrR7/75euAF0kgBBfIwAdSJIIWJBjFHHMw/w0+Jm+f+12XINbBxUyMhAiw2MZagxqRRY9BgQtZdn4Tw+YUB3sy097auBfC/qAvcvmZ3Ph4mDzzCcmHmpMc54QIQiY2bnDfq1L43Ne5JtJuiKGDYpikSKyk8amIRPrieY6Iuy36rYuuCqPDrmi7vZExieL70g6r2MMsKumGPsuh7OToRODVsXRlTNwZjZVGjxHyUmNkHiBPJ8i7GfJmc8RcGxP5xikukY9WhCQWJQk+SnKRipf0XSbZCDkkAlGJcXTk0ERJPT9eT5G6YyTgUsmwWQqvcNZT1SudF8vF7RGTfdxkFDtpxk/+0pihBCYp3WhKODppjX6qJS5p5ro7UjAtj8fzJTLf1spcDjOQxdRmOIn4OLNxp4ITYZ+02pec+O3rnDuZoAIbaL/82XIqAQEAIfkEAAoAAAAsfwAbAPcAcwGF/v7+RzyKAAAAAICA0M3cOzs76ejp/gAA29nnyNnbAXp6SkCJ0eXlQzZ+TVdXkjU1KwAArCgoLmdnaklJyxkZ1Q0dABYWqgAAfz8/ZzJyAVRUiSZYxxEpsRk5RSFMdC1pox5FH29vX09Pu9LS0A8jAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBCwIwLChw4cPFyBIuBCiRYgLEh7cuDEhgooXQwbIKOCjSJEkOaokmJDBgJcwY8qcqcAjyJMYNa7cyZNjQpwiCfwEelFnz4MJCRAtKkDpUohGjxZMmGCm1as1mz6FKkCqV55DtzYUKkCsw6hfASQ123At2wBov1K9SjdmVqds46bdq7bsW7Jv4Xbl2xevWbd5B/OdW7fu3cB6CXsNaxbw28hHER/WelnxXsaNsWoWi1lyT8piLSeWPHpr66elwQqoGlo059WmF/tlq3oz69ubDZP2nBZ0bZmPO+fWHbj38N/CXQN/Ttj4cZjJcS+Xu7syaunQIU//B1999nWar5fG3t6xe+rvsIlzjx6fvnr5k82ft5ue6Hr2BsG3lHPk6Waff+PF9xtt+72UnW8ASiUgUQQqWN6BQPWXIX4S6tfgAA9SF+Fp7m1V4X3hdYYhTv+pZN15IRY44k4TAnUigikmtuJJLfrkYYMxWjgjjSU+deOGOQYn3oIfOqghixwOOVWRA9YIZZLD7ShSj+0xCOSTPEYp5UBWnnTklReKp2VIXCL1435Bojimi1RSWOaWYsq2JlN7WtRmgG/CCCaec9LZ3J1s5klkn1wx+tCfU3oJ56CJFurjoXUimaaKS5YnqaAJymkpoJh2aqCapn4W6HVx4kiYAXwZ/4CoRQskMKufAsC6l6yz3SRdAr7Gp2tavCagwLHIJqvsslkB29mwX/Ga0LTUVmvttdhmq+223Hbr7bfghivuuOSWa+656Kar7rrstusuu9B6VSwB9NZr7733IjAvvvzia2uusSZkAAL9FlzvvwMbbDDCAedKsML9IvwwxP4KHOtvwRIlEUVspVSeSWaRBLJYHus28lYiZwxUyaOylClOZ4aJpXSOnqXoSpTymWrLCL1s5q1czVzfzvOhqhzPLpd69KmcLq1qzW2F6irSPSut3dNGX12c1BtCzRCk2wH9UMyEbqoj0fl5LZjaYC8ntkNkV2q2kk5vzTbXaFLdl9UQMv99dt1FN601z2+PVfjXN9N5991C38e43nv/dbhgjSP4uN90D97y5HEzVXnXaHe4eOiWcj552z2PDnjaWfdNtek+l415lqSTqLrmo8Jeu56ti4i14K4jrfvqovcuo93GC/l67CF1juvnUF7++9+4l878Rc4HPTftxGeGd5jSC3+9Rdk/mriPtwcfOPXqbz4+ROXbDD34uy+avKjL8+078sDvv37m7cvd+8Z2uvO1J33+Y13/jhethr2lVgWMl1SKpbINOSsxEjwKBTtzwc1ksCfSepcIR0jCEprwhChMoQpXyMIWauuDPNkXxfIlwxkezGK7EtjEbEgvifHQXgzLYa7/GJCAIhrxiEhMog5/eEOA7QpjHZuIQjqGur6c7Ckpo6IB3eSSJmXlikthmfj0x0AFsi+BHfoUq3L2PMgNr3q2u9/UVKXG47RKU3p7YwC9h8AyprFJIGKj9vI4QLgVcH54Ch936libO+Ytf5IrZNQQmShF5oeRoXGkzNwoSYbEb5Lbo1n9cLYqOwrSfJCLHG8OGcqhdY9EmGyMJmUHyVV2knKtdNwo6RRLx5xSfpwko/L4d0Y/eq+XdJml3AgpTPwRE4BoPCYglem5YEZyl+iTIx7pOM1fgpKZ13wl7xY4zEV283u0HGM44TjOYpbzkue0pACbOcdnco+dREKmbeRp/z16btOeohRnPuOJzRHpMZpxJKczzelFbyIulQc1ZkLduVB4NhSdy6yld25ZRYeuraBu0qdVqNlGcNoSpIDq4zv/eFF+FiqiK+WjNh+5NZGiB6PVvJgQH/gv8cBwJxtMTAeH89OVFItZSGUWVSoIpaKqJIQujKpUp0rVqlr1qljNarWcypFiJfGrX2WAV8FKViPikFhLZCIBfKjWIKLVYWpda1qZ6NYGlkcBgGRAQvD6oaz8Bowas4kW/8pUlAg2ZFX8DV8/9K/FTmqLKZ3pJmcXUHxKaa+AbOxFKakzgZJSpRUVoGP3o9m+dhSnuHJpO6Ep0Tlhtkml/RJkpwTaev/+7557fKkARnue2D6Ws6lFKW0lm07C7Tazr5UtcBslXJfV9p/G5e11fAuq5ZpPtfZTqG3dJ93jUHeNs3UucTMKUFdadkjJbdB3TRleMqGWuZ5V3Hhzurzu1ma9jTztc2l628rm1rXHhW16q5tLyzXXvfudbH2RG2DlFhh08c2mdqHLXQbbV5b6nW9Jp8famOr2wo3BbyYzPGH+mrHDobUeiOsiYgy3N3UaHiSHcYtQAK+YLi325YsLE2NUPjh6B87NgHs7ZPZa12bY/WyPgUnIG18lx8kkMUW3W7wSK1h8TrYKlG1z5KglWb5WLm6FBdzg3/6YfhE+4JK/+UQhJvX/zchqLJzffNZoLZWDhcUTVzcS1M0MVTp7PghUtUroQhv60IhOtKLbFWiD1JCH+qJKXOsqr7n+kK10rXOlh1jWTluah5SeIBRDJsU8X0SM3AHsyg5LstN20bQlMTWtElsef5q4ylOmMEth/eUZwTTFE0UxlaXZ0iCb5tfDDjaNWzvQYqc5QsjWtUzDTF6G8trYv7H1lftr3v82+9rPBlC0bz3tXJMblgQNN3vGve0TL9vDsrEpcjxKa+as09tKpjZ9a5ru8/r6lp986JkTaewXgbfXBgU4Kynb7Rp/W7YIh7bCObpjer/Xx7qRN38uzmSNvofiXUZcxAGlcexYHKIT/y84x72s8pI7aeUCN+lGVZ5gMe8a4tiu9b0dnm9zt/vmjx35ulOubvHqe8P8dra/E65tm5db2NJGt9LxjV6iL91NNa+2RcF9dXFbneoS9rnTpc51sDN958zuOdTPHe9+m13QDuxYT5/VsD/Hx+73aXRBjjrnpN4Zgw1btOAHT/jCG/7wJ9Q7QR5tw0jPZtKanuCnbYjpS0deg5OfYeVBfXkQjppkpR7sx2Sdk1iL3mSkjwirUVZvVTVd67he+8+f/m5gA/j1++a2LouO4DXH3OMmWviM/ctzMIsd9v1EO7yze/zcu5v4aa867pGuewPzHsZHl7E6T3p9Hmcf48NveP/0/z197Vcfwl0f7vc7vv2Zd9/iQjd686nvvvKDv7y7T7/8ZT/22yvf9szHf8inbNC3fJf1dcUXdgLofLFXe8nmf9ynf723fmyGf9YngUKGgOOnZhT4exaIfm/ndfbHfucHZO8Hcx/Vfdn2fw8YgA4Ydav1gmw3JuzWfy5YgACodjI4e7o1ghVYgmiGgd43f+ZXfywIgzcofgZofAtIf/N0hDOogzjYglKohDkofVDIgzE4hUhYhfkXgnsXdyEzd4AnRHiHIGe4IYo3EH02HGnYVIGHeHI4h3RYh3a4aGsoELyShwDAeJrXeTGUeRSzeZQniBBDiH8IV20FiED1G5//x3oVZ3qIJYmtRomQqGo4kUWTiHIrKITw53spSIT3Z1ydCIb7t4MEBxEZkAEeIAAesIoZEFwdiEuEVIoJyIGiiGSqeAAHAAECAAG8eAAcAF+5+IPuY4sbiHWg2BokwIvTEowH8AHXtYw75og654ko+IkOsQHB+IzBWAHTOIuttzXIuIQKiIqV9BDQ6I3BuAG6KI7VWB7leIXnyIUmyBAgsI4JAY28+I7F6IGkeI2mOIH/GIoNwI/sGIwdwHLwyIkCeYvKCI940QEIuY/8eAANIHLU6JD2ho1Z11kBwI36KAAXeQAMWZDjyB3zSIVMiI4gGQAVAI2+CIz86I4a2ZCc/7SSXdiS9hiEAfAB0HgBEHAB/DiMJ9mERShAOhmFPGmFF8gQHFCS0BiLR+mSTmg9S6mFSfiFNHaQUnkAC+mPSDmKx/iQyRhZEgkR+SiVxDiWJFiWHTmQ2IeSCcIBFXCXeFkBNimWVpmUWGmW5oiLblmVPZmKQmiNcQmRaEmXHymLhymPgEmPgtmX4ciYOacbWWmDXviUlMmXhTmAMdQwfMgrPlV3qWc+bxgmqYknq5korckUo3mHsjmbtFmbtslodRdXjpcAkOdEb5Uwi6iImeabdiaclmecnEec8vKIWBR6mzh6WoSJJ4Fq+SGdhmWJWJSS+eGDAPl8TrmT9fidTP9JflmomU3JlWepfnSJctxJi+GHnoEZketpTREol0M4mN3ZgJ/JgMlXn4qpnvjpnh94j/aZmc0jfAPqk/95ivt5lT1YnqC5lZyZngwqnlp5gO0pZQGqoZ1pjE/onxRKkBsaiY3plw8KovG5mCMacqFYoJFpJyA3cOl4giVKlh/qfh65kTL6kgvqNhqYogDaoflJew1qohCIo/apjTmKkzL3cTSnowwHn5IpcRlKolD6nhMKpGHzo1OqokIqoEBomC6amMEXo1GapV0apEVqo/2JpD06lyu6o445pq4HofxJgBZqnuEppSzZVWJIMmToQaaJZ3RnhqdpM6+JK7F5m4z/2qiO+qi0uaiSKkSSSkSdRlaMaFSZ+lSb2lWdymefCneQiZmkqhuv5mClqiqIqao5Oark6CnntKqvmqqzujyymh9MMhMS4AC82qsOMFp+5aq1Oqwq2aq0iquwKhMSIJUU4FjBeqzICq0dwpGsWq3cJBMR8JUOsHG3Gq3WSqxw+a3emnQw4QBfyYuL9aziOq7FKq1/6a6ykasOQgHnegAPYHKR2K3TKqwBCa9EIq8DMAH1yosh8HL66j0HSyLUCq4Im6wgMrC8GAEGy6/sWrH7WosU27AZBxMPALG8KAGBlK8Zq7HrqpQjG68Oa678KJREyY94pa4MS7Ixq7DGWrI0/7uxMJGtMvmL/LitIZuwKOuv/1qzMzu0OPsSyzqS0EgB+Aq0RmuzN2urJ/u05PoSHduNFvmxTTu1VNuuUAtgTksnAPuwWEuSvHivWyu0pBS2Yku0Xvu21gYTAuuMWeusp8W2PoK3Cae3gDK2IPIAERAB0xK4IjBvIqu2bYu4/8a3U+K3MTEtXMa1ayu5iSu1iusmjgsTkDtSd0u5eeu5ewu6feuwVmEBAmABkXu5mCu6o4uxqtu4pDsTGmABGpC6X9u1FiuzdvWbokmpddd3fqeckie8mEe8IBSqjoa8ewepzNu8zvu8WrWovKlWu9mbk+q72PubO5ScwDmco8mcYf/knJXIuLD7ulNinSGhieNLn24aoj1Dvi7DoifHvk56gvDrXvKbjfFIpkaCoHCbu1GboGL6psfGpX3aHvf7vnLalgRsoNjjvwActLe7mSDYwC9qIxB8sRNcuQI8o4/Jv1Viphv8ueZboXwKnmeHomkav6xbvlhawe5bwFWawH1Bw0pKp1szwy3MwiUsol+qnR2iwz2swEN8nz+8v3Wqwge8ukXMoWv6ljdav9how1bKpMDXvyJctLirwR3MozHswOSTwbobwVtMpHkaoRhqpw4qwVo8uWcKw1rqo0I8wgi8wz78xB7aplKcpIdLx/IZp01apipHxfkbf3KsxkZaxgH//L/6ecZ3Gpo7JXcR1LvZW5zSe6hRk6hcsajQ28me/MmgvC7Sq5t+OIjKu3inzIaprIer3IeGqDChVrwGYKmX+lXgqzHiC4mEbMc9g76n5mrTBMbwI8aLTMZu/MLj2SXBfMEwQ8xszMhj3Mgr7F6fogC+6qvAKswElMXQXMxcHKZozEszMbcXKbF2oc2GxM3GzMHdvIVx3DOSogDnCrLYgc6Go87fvM4kjMwXKs4xcbXMmq727EnOrMjPrM+TGcPGkbTn6rMgMtABEHBgitBM7MdeqtA/orP1ylcKANES3blNXMXYyCAqO7Bo29HM/DP4HM0s7c1mPM3wDBP0GpS+/2gtEFC7KA3CMDrIvFzDC8ymZEfOvFjT14K6OZ3E7fvOPh3S+SuvCjDTvCiURY3THl3Qx9zG7OydMN0XXlLSX4m2D53SQWHVWZ3PZv3SS0xy2LrR9SzWB7rSLh3XBy3NaR0pytrQ5+zWDwzXc93SfY2nW/0iAM2PzZrXOo3BfG3Qin3V4PzID+cgDxDZkh3Z9NzWh93Mic3YFN26/KynyuxFVZ3ZZe3Xi32ede0yLhfWl63SPM3UPw3FxNZXod3aFu3CXbzGjw0ji8pTk1zJm3a9b6XJ5iO9wItUoXzcyJ3cyk0to0y9pXyIrbyHlMy7lLq9hYic1/295YHJI5HL2f/Zx1i9z1vjy7OGnWEExN4zx+FdxyFdo7B9okkd2ODdzhIKx1s92/bb007syLidxkqMwhW93gHe2HNqwas91qIt3vSt2XRt36d9yP+dzAO+2bZN4Az8xXodxgnO3rXNw53twTjMHeq94KMt1/VNoAaO1HtMwCDd4XfM34ns3/H94ETs4jV+2xeu1Pg9xfot0oB8xSFM2wLO2ThemSG+nYgM1H+95KW9p2hK4zKc5O/N5AxO2hSM4hh+4G8t5CSu4Fr95AAuglKexyZe5WVu2g4e5kM34hTu4UOupjCu5DRowGru5l3O4UXumTqe4cO84RN+1mfu5Gku4Y7WMBlpS4f/LhYNEN2MbryB6OhA5VCLDnOTzsnLfemYnum3CdyW7NvDy+m/Pd2d7uka9FcIcOqonuqqruqw4hGr/uqw3uJvbue4OjCwfuuorhG2juu3jt4kwt0bM0XP6dohTd6lZ+yqh8Q5POZDGuh4fud/fMTsyewTDehUXuLufMKEvubULuvQTuQWbuQpvuwR3s9ebu1NntBxPuVzzubobubXLuhYvudavtdc3ub4+9qESe8qLsj5Tewf7sX8Tu4zXuf5DvB5vu/3zefbfO/vju0nrqBZ3u9Y7PBWDvFXLvEDL+LdPt/4fuPhrucLX+8abvHO/udoPejmnsIFv+20/vDnnvLz/z7yFB/k/27jSx3wBT7xBL/iGL3LbzzzUL7jfAz0Op/jNN/z/s7jCB/yCj/0DJ/OJh/vz/7lKu/ZVNrxRj/rL67tKy+qb5XoHyf2T1Hpoh7qpH68lC7pja7pbv/2cF94oP7paf/ocy/LZ0/3dd+IPd73fo+Zu87rr67rgt/ryv7tFQ7zVa/4KE8iyO4Q6qvLbvvx3v7xOZ/wN/nBOI86lc/44G71Qm/wGZjAnX/xMQ/YYO7yPkr6Hu/5id/goa/6o8/LpX/ynw/7Gp/0iP/ytv/6pr/4Mp/7UN/0rs/7vW/82Z76X+91rL/1u9/1yo/1zE/7rf/7jZ/8Vx/Oi0v9zv9v+Tc87s8P8sV/8Jvv49I++eN/+eGv/t6vv4Y8+0Nc+1R//RE/wDy//pxf/cdP/pjfouDf/gABQOBAggUNAhAg4ODChQkZPiToECJEiRMZJiQQQONGjh09BsD4UeTHihZNnkSoEKXBkitTuizY0qXMlTRRhhyZUyNOnSNtwoT506TQiUSLqgT6MqnSpDx7khSQ8alIo0uPWq16ESnQrFqxbg0adSpVsWM9drXaEOzMtTfbuv0ad6lTszvL1t2I9qCBpAYS8gXqVwBgl4IJF/7bN3HgxTAFS8UboAFds5MHK06YWfNmzp09fwYdWvRo0qVNn0adWvVq1q1dv4YdW/b/bNqzD680jPn2ydyML/vezbsx4t+Oh+M+jtIAA+bNnT+HLjAhAurVrV/Hznc6du7d9bJ8e/J7zPBDFRront66Q/Tq1Y8nqGDAfPr17d9nkHBB5AALEOjnbwH4BhpQuvIsKjAlBPaLTEABFgwwQfnuo5DCBBLiTyMCMMwwQQ8PvGquuyKjzCwJ73MAAxVXxMCB+y4UIMMANoyxQxApuvGhD7+CjMQR8TqxPgkOILLIIiekD0YZaZRxRxHlaurHukocK0j6IjAyywjsUzJDJm2EkqscvRKxRyClNHHMhZAcwIEs35Sgvi75+5I/J6MMMywzp0SzSjUPQlIBCt7MkgIk/+eMrE4f82SL0Zr6nIrKSP80CMkJCH1zgiQ5pJPTRZ8EFc8ym4T0qSAVwJTQCRHFS9EzHYUrVDH3TJNWP62a8NIsL4DggkznY7UuV/mEVTxKyeORVFsnxXU+VLOEQAAI3lzV01atJVbWsIo1b1lTS+3JSl2LzCxLF4HF1qxh0+QWwWMjepdAcHWS9NtmnR2U3ISMNHTTGjv991NRBxYzWRu9DTdegdgcUl8BjIzT3yXTvVXbRi1+FGF6583JyvmwJLJcIreUk+Kp1q2Y4G0xdkvjjjn2SWEA2Bzg2QNEPoDmYNU12V6WjW33KJdjHpqse+tzYIIJMlP63JIDvhbqbHJVvpjqjJUl9ej7MqtwgJ3HQpnZn80LGseioTr7LJlprs8CASzo+uuTe064bB1l/jDtjuqteym26dPAAg3iplunsH22OtbEjdWbI743xlUBySenvHLLR0ioAS8z58+y4EzqzbjiiPvcoscytKxxjTzvKyAAIfkEAAoAAAAsfwCXAPUAKQGF/v7+RzyKAAAAAICA0M3cOzs76ejp/gAA29nnyNnbAXp6SkCJ0eXlRDZ+UFZWjjc3K2lprCgoKwAAaUpKzhcXABYWqgAA2wsaYzN1AFVVkCRStBg3eCxlQCJP0w4hyhEnH29vX09Ppx1Cu9LSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ADQAYSLCgwYMGBAhIQKChw4cQISJIuDCiRYsJFAo8yJEjRQMILop0mFEAyJEjS55EeVFlSJYYNb6ECVFlx5s4cxpUuCCAz59AgwZdgICn0KNHFyjUmVMhgp5Io/pUKuCpVKlUrV5FmhXqVqFdvyZdyrSsWQAKxSIlkFatULJndwog4Pbt3LpBFdLF+1Mv3753/waAG7fwwbZ/2QoQPFiA4YJ+BUf+OxloAwwdBHTAwPloZbyf6xJ+/BgxX8WMRxcO7Za12tAYDhyQIECC7AMb7O6lHJg36d9oFwtGLdnxb9dikX8N/UG2QgG3D3DI25uv8q2qgZs1jZe47+PVQYf/Fx2ew+3n0S9Q3219fGvj2lcLT8ydPPDrV/FL/RwdfXQNQOkXlYBIZRefTvW55V1797n3moPJVadBfwpFJ1uAEC6XIXbwHbjdfKcl+GCD7IlXInm7NWChf9GJANiJ78E4oodniSjWguKRmNqG+fW2wYoVWnhAAz4R6BmP+3VII4IgdmejhjoWJ2OEu01IIXRCvrjjlFAuWdaTW+FoH3hcclhmj+xZSJttFgJYJJIDwlmgkl7eBOZVYr4XJW9nJsmeebdZIIEFFnqAYZ9xIjpnnUzdKVWeM5JmpG5bBtWckNFhcGilxTHKJGOQRrhne4oeWWJsmMqW26ZS7uhpU03W/xVql5LKaSqnQP2YKpGs8unqq3bGqqCjcY5qIq5AeXDBssxe4GavpP4KLEfErlXtkcaiiOyxrXY6LbXCqjUrh9nGuK223X73rVygXmtXuQ+WSmm60Xq7LmTh3ugudfBSea659Ob42EaGUSTYAiXtSHBhFCXglXgOFxcxbxO3V7F4C9f43MYcd+zxxyCHLPLIJJds8skop6zyyiy37PLLMMcs88w01yxzxmd9RFrDNEVk08Ay9VxT0EKTpNHORBdNgEtKN/TzvQMZ+OHDblHVoFZ4hcWX1eBhXZfWWUsNrNiN5ktrabbOqy7a8q4nLdTBNfi2YZO6bS/b/0YKN9mf3v9Nd9p2r/1329AKDHfc4M0tH+Fa+r143qIejnitisdVd+Fj1sr4m5s3JvnkaFeuceeXA9avhqTTuS7fsIr+Yeqpn26m62OrrjHtZcOOO5O6O7667R/uDmvvgj8ecOZ7A/+l2eSSCfnZgz/f/OGs47QvhrKjKbz1gGOup+TVB7t9sMQz6Pzx31OvfNnjd1Q65+2DW77hycvt++jS95i9n/F7GL77zNPf+Xx1v9flL0mf+x+4+ieX+SEveujTG9QUeJgAInCA9Sqe5brXOA1+i4LsKuDyHJg+vEUwcuqznwcNeELoGY+AK6ydCs2nuQMWC4PcimFOcGYWnQHNgnHiYVn/Gka191yMPEJkChElVsQHJRFWNouiFKdIxSpa8YpYzKIWt8hFjz1xhwphSNMmEsamLe1oP1yJ0pjWtKcxLGlFY+Ma0Zi8Jn6FKEbZGgjx5bWqOcWOW+FarfqoFrB9bY/aud5PxiXAGrZweiaEIQ2T1y4gLgqH6NIh72x4SfVVkoH4IqEEX5jBSU7Qkkdh5AUdKUn6QbCVD7yXIn2iyhuyspSuJGUOTSlLVAqllp2MJC5juUFRopCSw5ml5zAJME0Oj5PYAp8vgwLMaN5yl7ksJjTfJc1PijB32+QXM+MFykROEyjV5OY1M8lLbT6ykcikzzlNN05/fXOT71xlPEM0/8837S9R5YwaB+F3T0YpM53iXGcz24m/fNpyn07q5zIVSs6Ccs+YLvydN5150XBir56oC2jikilRRAoUo5B85TBLeMqNMpSFsGSpLtmZzQ9KFKEfpag9OUo+j9LTkyQV6fsaEzuQzs6iXjqoMk0aHJTCU6XYJOYQkdZPhC3VJFQ9ohEByaEv4mSJFONqj7wKwC6a9axoTata18rWtrr1OWTtiA8LRse4zPWNWE0jVePqkbra1a85A6xNGwQewhb2sLUybGKlqdi/NdaxiEXbY+WTwPUxKbKQXaxmJYtZyjK2sxqb7AZFG1rQljaFpl0eaT+0WtWm1rWo3axnZTtbzv/SdrSvfRVTTbpby8KqtWUD7mU/e1vW5na4xTVucmFbv+NyT7i/de5zpRusykK3utQF4HWxu9zgEte2mQVveMdbW/LiNrbiPW961Wve067XvafcLrjkO9/s1re7yG0ufqe7X/6+V7n/ZW587SsX+lbQwAUmcILR214Bl/fB7IUwfCU8rd4qGF8IxvCFCWLh7zbYu/3lboBBPOL8DjjE9y1xdFGc4g+T+MQqFrGLTUxhB0f4xoPdMOhmvOIYy7jGL87ZXodM17wWma8IEewQlaxEJuvkrn818gbfSuUqW/nKWM6ylmeGZIPwbIxfnqOUo2wABiTgzGhOs5rX7GQwjjn/sCaZSRzhKDQ39lKsUcGjAPDMld4yYACADrSgB01oBXR4kHweS1USDRamUi6oSAVgAghN6Uob2rf+hSpNpVrhm161QZMmNAQcQOpSOwACg750hjk8UKKK9NHyFOpCRJ0qCihA0KrWcVN96s9uQpqnkiZ0BFJ1gAng+tDCjKpMNfrrly4v1IF2ALFlc2tA55rFFXSqPlva7Jp+CNoDUAAFpn2ABwT62j4GoLYfyu1YRxpc4J4AuWWD6nAjW9ML9XbtXKrvskFbAfOWTQSsfe+Z5pvT++42wv0d6AcISVCEshCq0c3jjjo0mO3mp6yhLW011cZCtrY3pn/szpiOktnu/wY2vAU97CtZyAEEH7l2jaq9d/vP0yWV+coDDQEgYUng5y54yVd6cpvye+FMAvcAHH6eINE76Do/MM35Z/MDKTXnoE61y2Vjbqiv+qS8nihEZfVp8Ch9AB0/AIuqHfOv7/ri1hz7sLBu9kI/IAIReA7eYX7sqC842ZtettEVLvhvVxrQz7G00Btq8mNmPKIbP/wAEk9pigMZn43P6OBT7myGH74CAqiA4v2uYZ2GtOrxuXrkD5+BCmRg9G4fquxJD4AuF8Rgf7Fqzm1PkIYp4PfAD77wh39p3g8EynDW6oOUHyHjb/n50I++9KdPfSoav/ZlBHP2xXx95PewzV8F//9NPiLnOotfrhox85rXr+YGMXooRdlz7hcP4IoHi5BiESTa/iz5w88Q8qjHarFHe2AXgAJ1dv0XaP9HdnSHbX9nf+q2erhmaqbGdgOwgHMnawP4TxgnH2cnb5gycAo4UpzXbzSGY/VncEVneLg2bfV2gSSocQa4Y5fXY/i2gs82aExXa9WGgeJSdg5YeukmPxI4AD1Hbnzng/rSgEModUHIakVVK+DWcuR2a0r4FTj1U034gDVocSpXQRwXcOUGg7Amg1/IhShoY4zXeUkXaOMWHRCHKQpwhWEChFsohBBIhDMYRoEGgrexJkIycHSIJ3aYh054h1AogeK2ddEBAmT/GDqEh4NBloaTuIYmCCvglnZq53RcB2iD+CiF2IUkN2FDd4nco3RUyCLU5okxCIBniIeiOHOml1IeOGhHuIk/dwB894h/c3SFp4YpSImYx4aYSGhMxyIhx4pl6IrEmGmx2GIq6Hi1mGoP8ADPUY0P8IK8KB++KIknSIrgCFPNGCwIiHgKUWmfGBVZ2GtPKFADGIVoU44DAHqiR2npaC1MaIhoGI4jVISD1nqvZ4+tyIAaqGu9BY9/I4/9132zpHsKQ2RHBpF4xXwawpALQXwYKXzVt5Ec2ZEe+ZFaZpFipDRkVBFtdH599Wbfp5JLxpJNFmdmJEdz5pK6JX9bE3/v/wcU+qePsCiMsIJ/d/RH80eAA5mBe0iU9NePcKdOcveD+fiMh8iTibiUCdWUS1iQ7UiDpRh43mhQOIeViOiOUwdQM7iMBHmUGziWc4KQvdSN0uiTzgiXPUWVOWWVWBiKcimLYfl2mUeLKGeG46iXUimWs1hzr2hObql5wciPlSiOytaVdaJ6aGmQSNlqs1dZiemXjPmNi+mYXPmWmweYpjiKnQmMStmXT9WWkQiapdmYpglOdKmFj3eWh0mYe3mQYedokFiCSGeDULmPnnlwv1iTmZmaeQmNx5ltuUmUZmmUtamVm+mbWymckJlUXzmZWYmbscmOdlmHT5mcwPmaw//4mKz5ZFTFK/yEnnXRACiZZDTpZhZpkZvDngNFn84HkviZn/q5n/z5MvEpkXD2n3o1oBFJoHh1fV2DAAq6oAzaoA0qEE7hoBI6oUnpmhZ6WSAxoRq6oEuRoRuqobr5Nzn5E3o2olNRoZwpnj9pogFgSH7EnLspmr0Zl9FJo8FZUc/ZnE4JloMJnTe6U4FJI5L5nCgqnZb4mYqZcLw5nCl6oUb6o6eXozHKjKMpmL/Zk0dKneVJnKuZpE5qoyrqhUS3pZ4ypEEalVcqgGp5K2U5pbR5puH5paR5mmPqpWV6nURame+4nJjZpZrZmk0aqM+0nWI3m84Jp1gKqE9Kp+T/aadeWZzblqa22aOzx5Z/SaUzOqdyaqXTiaOIiph+apw1qqmCKqaN+qdKKqNMuqibipxZ6qlVKqR4+qmTKqk+yqhIiqpcuqTVyamj6qtQelRS2ouhGqngmahhOpeoaayhiamrCqatiqadCqSxenvnSVLq6Rb2CaAByq0rOZ/vs61/05/kWq7meq4eKaAFeqDe2pLqyq4GSmYeJqlFCq2lWl0e+qES2qH6CqIwSqkEWK+kyqpMApSBJJR69K+2ylt6SpkcyKbDurAB27DZWZnr1oG/83UCC6zJGoGEGqIS67AiW5hUF7HHqqa3SbEpe7FxB2ODubGuqqimmqui2mka/6uyAPuwakOrI5uzPXuD1JqpSwKzcXqvHButDcSn83qyt4q0yOq0U7ms7NZLN5uWFcuyTOmy9IqzIbumO1utVse1vxqzHUu2sCm1GJtj7Ui0RUuwg4q2LUu1Osa2T2u0egi3WSu3ayu2Yyutrxq0z2qdVfuzEou1Vam1TEu3KLuySstgW2u1jPuxtPeu8rqu8Gq5lVtk4Fqf7Ykv6Pq5oBu6optWlNutmGu6p7uSpau67Vo7/TqhEFoVr7uvfCuzA/tc+Tq7CsqvusugILtBLFqiQwm5AGuwV+GihfS7t1OsU8u0TWu3yim5fcqrZOq29mq9ylqnunqnkNq8fdu21/87s1rqqJE5q2Bbt9h7tONJs8zaad2btrarvul7t9pbs6nqrL1qttALvtl7qva7q6qav34bv/r7tvXbvvf7pue7uD5LsmRpsstLveR7uxQsv+ILq0Irq+8bt86ruAWIt4ermhK8vfM7wGUbvSBcl4a6o9iZsjobODyro1fZwg0MeONLwuW7wXnbwbWLqzf8v9zLvPB7wvxrwf3Lvt7rvkLMwd+LvuF7xD+MwACMv9X7xEZ8xfTrv1IcxCMMxCVcxAV8wYArwGGrwyHMw3sqvQNzrfKUrWohrqnrrq37ZOFax+8ZLKObx3q8x3zsMqsrx3H8kn8syHMMn4V8EwyQyIr/vMiM3MgC1bsMGruQ7Ls9DLW1Olq5q7u8O8mIZIEJOGgMkEdZg5PDS7g8bLxYgbBhQ3ue/MmBljBLvMNNzMCF27iF0coOgI26vIuvbKYLfMmzDMzBapif6sm3KCTaOACw3MVb/MVOXMFZjMRDnBOeTIWBOGjLHMBVDM0mbMl8ecBJrBMWqImYwsvZTMUTjMXdvL+hZMtxwXaLOG3JqMy+nMHr7My0PK1RWsx9GHDGBmjnrMD2DMb3bMXRHMXhTM3WJoYHUG0Bfai//Lz4LMw+jMGBexDV5oeBMihC8s8PzcJ5SrzRuM8LXG0A53FsEh0OXc8XHcbcTNBJq8aGwXYh/+BzL9fL5jvQz6zOMN3OMn3L5/aGzsGJFCBoHz3DIW3Kf0vSOg0AFnjMqqiNR32X3xnMEm3QKAzO04wT1dx0P9d1OG3GKty1V+vOZ+HJJ42Lt+HJU+2dPErWNmzRZDwQuDwBE/Acds3LAM3Sc53PVq2dKSybZ314lEdpbU2IVU3ABf3SPh3Y3PnOnxd6h3fYoJjYRLzTLu2xjl2og11pADnZfL3NPI3Zi63ZWs3ETJGRqg18I6AQbuyUr/0VcHy5tJ25eLW5m3uffbzbvN3bvD3IdHzH6Cfcww3chhyvqFvbvzPJCirJzB1/Io3GmTy7mwzJyjs1uUfKCRvdf71oQ/+Jynl23csj1oKNxpRpuGN9qQLd0qWd2e6d1Vqc0Fyszen83j3t18NcsjHsphDd1BTtzYB92rKsxMws3xN91YwdtQJ+xs263n393+yM3xU9xqKtwbHM4Ip93xBuwPG91Y964end3Wm82eLNPiBe3iJ+3mat3v3N3hqO4KOt4B2O2gnc4g8O4/ZN2jFN4gq7QeT92HANsOiN4gRO3zic4BuO1Tu+4CFe5Oh85DGe5Ej+zTM+4DUO0jzrwTgO39JM41Ps4BXe3mL+4pX6avyN5RGt5QFe5Rh+5Uid5ZUc4R/M5ETu5lT91ubtwGtZThYZ21Tt51cx27Y96MkdZbgtGIIEbhYBAQAh+QQACgAAACx/ABsA9wClAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On9AADb2efI2dsBeXlKQInR5eVFNn6RNTVUVFRoSkouZ2crAADOFxexJiYAFhaqAAAAVVWRI1JoMXK0GDc7JFN8K2K70tLXDB3JESc/X1+jHkXUDiAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIELAjAsKHDhw8XIEi4EKJFiAsSHty4MSGCihdDBsgo4KNIkSQ5qiSYkMGAlzBjypypwCPIkxg1rtzJk2NCnCIJ/AR6UWfPgwkJEC0qQOlSiEaPFkyYYKbVqzWbPoUqQKpXnkO3NhQqQKzDqF8BJDXbcC3bAGi/Ur1KN2ZWp2zjpt2rtuxbsm/hduXbF69Zt3kH851bt+7dwHoJew1rFvDbyEcRH9Z6WfFexo2xahaLWXJPymItJ5Y8emvrp6XBCqgaWjTn1aYX+2WrejPr25sNk/acFnRtmY8759YduPfw38JdA39O2PhxmMlxL5e7uzJq6dAhT/8HX332dZqvl8be3rF76u+wiXOPHp++evmTzZ+3m57oevYGwbeUc+TpZp9/48X3G237vZSdbwBKJSBRBCpY3oFA9ZchfhLq1+AAD1IX4WnubVXhfeF1hiFO/6lk3XkhFjjiThMCdSKCKSa24kkt+uRhgzFaOCONJT5144Y5Bifegh86qCGLHA45VZED1ghlksPtKFKP7TEI5JM8RinlQFaedOSVF4qnZUhcIvXjfkGiOKaLVFJY5pZiyrYmU3ta1GaAb8IIJp5z0tncnWzmSWSfXDH60J9TegnnoIkW6uOhdSKZpopLliepoAnKaSmgmHZqoJqmfhbodXHiSJgBfBn/gKhFCyQwq58CwLqXrLPdJF0Cvsana1q8JqDAscgmq+yyWQHb2bBf8ZrQtNRWa+212Gar7bbcduvtt+CGK+645JZr7rnopqvuuuy26y670HpVLAH01mvvvfciMC++/OJra66xJmQAAv0WXO+/AxtsMMIB50qwwv0i/DDE/goc62/BEiURRWylVJ5JZpEEslge6zbyViJnDFTJo7KUKU5nhomldI6epehKlPKZassIvWzmrVzNXN/O86GqHM8ul3r0qZwurWrNbYXqKtI9K63d00ZfXZzUG0LNEKTbAf1QzIRuqiPR+XktmNpgLye2Q2RXaraSTm/NNtdoUt2X1RAy/3123UU3rTXPb49V+Nc303n33ULfx7jee/91uGCNI/i433QP3vLkcTNVeddod7h46JZyPnnbPY8OeNpZ90216T6XjXmWpJOouuajwl67nq2LiLXgriOt++qi9y6j3cYL+XrsIXWO6+dQXv7737iXzvxFzgc9N+3EZ4Z3mNILf71F2T+auI+3Bx849epvPj5E5dsMPfi7L5q8qMvz7TvywO+/fubty937xna687Unff5jXf+OF62GvaVWBYyXVIqlsg05KzESPAoFO3PBzWSwJ9J6lwhHSMISmvCEKEyhClfIwhZq64M82RfF8iXDGR7MYrsS2MRsSC+J8dBeDMthrv8YkIAiGvGISEyiDn94Q4DtCmMdm4hCOoa6vpzsKSmjogHd5JImZeWKS2GZ+PTHQAWyL4Ed+hSrcvY8yA2vera739RUpcbjtEpTentjAL2HwDKmsUkgYqP28jhAuBVwfngKH3fqWJs75i1/kitk1BCZKEXmh5GhcaTM3ChJhsRvktujWf1wtio7CtJ8kIscbw4ZyqF1j0SYbIwmZQfJVXaScq103CjpFEvHnFJ+nCSj8vh3Rj96r5d0maXcCClM/BETgGg8JiCV6blgRnKX6JMjHuk4zV+CkpnXfCXvFjjMRXbze7QcYzjhOM5ilvOS57SkAJs5x2dyj51EQqZt5Gn/PXpu056iFGc+44nNEekxmnEkpzPN6UVvIi6VBzVmQt25UHg2FJ3LrKV3bllFh66toG7Sp1Wo2UZw2hKkgOrjO/94UX4WKqIr5aM2H7k1kaIHo9W8mBAf+C/xwHAnG0xMB4fz05UUi1lIZRZVKgiloqokhC6MqlSnStWqWvWqWM1qtZzKkWIl8atfZYBXwUpWI+KQWEtkIgF8qNYgotVhal1rWpno1gaWRwGAZEBC8PqhrPwGjBqziRb/ylSUCDZkVfwNXz/0r8VOaospnekmZxdQfEppr4Bs7EUpqTOBklKlFRWgY/ej2b52FKe4cmk7oSnROWG2SaX9EmSnBNp6//7vnnt8qQBGe57YPpazqUUpbSWbTsLtNrOvlS1wGyVcl9X2n8bl7XV8C6rlmk+19lOobd0n3eNQd42zdS5xMwpQV1p2SMlt0HdNGV4yoZa5nlXceHO6vO7WZr2NPO1zaXrbyubWtceFbXqrm0vLNde9+51sfZEbYOUWGHTxzaZ2octdBttXlvqdb0mnx9qY6vbCjcFvJjM8Yf6ascOhtR6I6yJiDLc3dRoeJIdxi1AAr5guLfbliwsTY1Q+OHoHzs2Aeztk9lrXZtj9bI+BScgbXyXHySQxRbdbvBIrWHxOtgqUbXPkqCVZvlYuboUF3ODf/ph+ET7gkr/5RCEm9f/NyGosnN981mgtlYOFxRNXNxLUzQxVOns+CFS1SuhCG/rQiE60otsVaIPUkIf6okpc6yqvuf6QrXS1NA8RRsSyllXTNqT0BKEYMinm+SJi5A5gV3ZYkrUaZS0556pxkmr3+dPEVZ4yhIVK0DRHCKYpniiKDWyRBmxAABtogLJxZVPkeDSx5bn1lftrXtZm4AAHkIAAJIDtA2iAK83mz3t9bNKNBpnHYa4kRD6A7Wl1+wAZMF+4sfPsHf9G2mLO9bB3zRAOdNvd3RaBvHt93hkBm8oyTXdnG/JugHcbAzabt5PGzWSNvoejO653+jDQ8IS8G9sRJ/h/x3RwCuubxv7//bjDux2CqEk8kBRnszpP6uvIKjy4DNGAyj3+8QM0AHEvJ6mMZ27umg/35vANAMc7LoCeH8DlIq8xyW/5yYeeOZGt+7i2uf1xiAM96q2dOr7JS21dAs/f3baABCzwcQ+EvKXnvvc6R65kpF/3Iex2erfjDXW4Gz1sVGclZatN42vr3dsD93vBDRp4jHcZcX3Uud5//nbTxtzq5b74uTWePA843euVl+2XDd74zV/+o3YP/WNHz/ix07e8Ztc1lIJeb4iW/u8IXjPkxUP700ObOXOXet1lj2ZeK57uT3Vgx3r6rIb9OT7Pv0/0EWSsOSf1zhhs2KK3z/3ue//74D9h/6ML8mgbRno2k65zpeHaVlDPUNQadD/FMH1p9Y+6PKemlakH+7H856Qk/hcRpzVrJ5FFiGVv0RZ8YTd8+4ZrCUd8+dZPCuhhq4VyFJhdEEh2tjaBwVaB/iV8YJaBr2dcrrdhsEdsi+cmCRaBulWCQ1d2KIh8EiaCJkiCHIhwwmaBHYiBDThtG0hzKWhzNPiCJ6aDOAhgLkhuM/aBCxiCPciCSHiDJveAT6iBJ8eEF3hZtxeERzeESniC/AaCpJeEFQeGQIZ7MJZ6MmeDQCiDaqaGmLeEhNeEYyiFDpiDWLiDDGiEU9iCduiDVziHWeiEfHiHUdiGYviGXliGMBiGdP/Yen8IhTxYiIBIhZQoiVpIhmtYhHl4hJPYiX14iEXHheIFh7g0eLHnhr+2haoohFU4gpwoiHqIXqyYiCqoe6coh6loi46mfCHDfNknRNNnQQFoPuNHEH02HMPYVNoXfs74jNAYjdK4aMc4ELxSjQJRfu9nf/HHfpnmjfXnRG+VMO0HjpvGjSD0G6TmahkHgFpEgIbVjvAYEgbIjrYnd6SYe6YoZa9Yg+6Dj63Yhf1IhIG4i4+4igDJiwJ5iVZoiaBoiFOXkAfpigwJiwUZgwoJeBI5iDM4kF/YiGeYj0LWjqa3gg2Jh7LoiVq4kbPYkRXpjxfpiBzJeCypkoT4kJX/iJIGOZMImYAiiW6LuIkxGZIBqZE+WZT6GJRxaIbFh5QjWZOh+IkpGZV7iJOYSItQCZFVOZVaeZNcmZMAlpVgKZU72ZKK6JGM+I9HmZFJiZZC6ZBfeZWkJ5ZyeZYvSZBwWZY2OZdrOZELaZUn6YFxGZgR2Zc8SZGAaZF5iZF+aZTA95Och4ZA6ZZLaVx0SZhbqZdU6ZWa2ZV8+ZhOOZl3+ZGx2JljaVQNg40AwCs+5XzFGHGvGTWqmYy/EpuIM5vTmJu6uZu82ZuM5nxxdX4JkH7iaGfmGGryBzHwB0LJqTD0d47FKS/riDL7d4CE9Y62WWsdMo+o9mpY9HuqoomV/1majHmYfzmYiumHiNiYbTmaaTmUTcmWIymeuciUWCeZkfmTl4k9gqeL5WmWt7iPCAiamoefvnegj4d6+mmYVeJ4V6duBmqS6SmKBQqZB8p6AaqU9cmGoxia+emhFxp3DGonDoqK/7mXdpmYMCmB62mepaih/Oieb6meHSqfoqmieKmTJ7qZddiiAIqY6LmiOiqTP8oeJeeZKRqkOSqYplmXq0ifMYqjpAmf97mgBGoi/WmfEGqhEiqkFIqlJeqfRIqiGUqZG/qDNcqeaQij7dilSyp2kYiZnLmjSFqmMjqe8xSnE0qWdHqaczqmPAqJPkqmQNqkcuqSUvqeGuSLJP8DjB7kmnjWfMJom4KxjGGCm76ZqZq6qZy6m5j6qUIEqqGamqQ6qqZKLOpYHqq6qrqRqq3Kqqpyj7DKHa4aq7NKq7eaH7L6qrbKq736q1tTq8HKScKqq7naIcVqrL46rISUrN7jrM96rNG6rLjarNIqG9BKItmKrdfKrdZKrcjarUSyreMqrqS0q8CqrOmqrsy6ruEKrgJErnQir/NqrvUKr9O6PPTaHvvKr/bqI/3qJujartXqrvlKsAWLsOxqmf8qsA3rsPiqrQ8LKAObsAt7se9qsBIbsd6qrxPrMgFLsR8LsiPrXhWLsRursR2rsBnLsgfLsBx7riXbMyE7JTX/S7LfqrIyG7M7q7M967IpKz43S7Mz2xdDS7Q8e68em7T+yrRN67NKC7QrC7NQ+7RSW65OC7FVa7VUe7VRa7EoG7Rg27Jju6inapzYeI2lOo5ri7Ztu36Y2qlyO7d0W7dYhanDqVbCSZyiyrZnC7c7BJ3k+I2zOZ1YVJ32WLSndZ0HyJ20Ap5bA6XteLRG+6ALF5r7ST5ZWrZiG7ZM2qdOCiBH6qcAq7htiouQyx2SS7mLa7k4Z6XhqadeirVbq7Vaerk2mrnws7meS7te+6dEmbsjaiO8S7a9+7MgGZ9qahqjG7o2a7oJWnvWNKiBarucO7XkCah1Krq1uLytm7WF/wq6hyqoaeqiJgu9rpt0mDu8MFO8L3u8X5u9wbu8uktAYfq7pQu+52moe5qJsvumyAu/+Sum82u+8/m/U/q+xrvAi6m9pEu+FQqirHu6Amp7q4u++vuiZpq6+XHBGXy+H7ymGzygsUu92/u8IVy5JurAzgt4Hly7IpvCH2qjmMpTEfS2E4SOMaTDQIV9fkapPGxUdjvERFzERvybwhic2jh/QfxUAtNpngZWTdxVU8xnzbkwVSxoWdyL+BdFHGOd5dFFljfBjvt/lFu/hnS/DDXGpouhMWxxYGp6avQAdFzHdBwBzja56ZvAnZunJvzAITUTEaB3E+BYftXGpmfBCP+sqLA0ExNweBAgbhPsxigMx0bivo0cEw9weNi2WIcswwhatGhsOGpsUQ7CydjmAPSmxyt8wjhryQ0qxzEBAaiMbXgMc5OcyNNbvkXqMgyiALWMbRQwcblsoIr8xy3sXgziAD2ndmz3cXj8yTC8v4RayUQXwTYKGoOsddv2cYWMy4hszLuMzd77IxSwc033bg9AzOFsocfMy9Xsy3aBzu82zOwMypT8ytccxwYqKbT8bzxny6tczO48zvxsoZICzACdzgegygPdzhJs0JdcyiylyQvdbYb8vQQMyG+8zxMtyzPhACI9LSK9zpIM0dlMwpG7yDMqG5g0LVyG0uX8zuT/bMB9gUkVIAAVENP4rMuZd9AgikkXUAEXwNPTrMEpTdNAndK9R9ARrVNvZcMYh7fWd33RmcNX3Y2Wqmc43I1H/NVgHdZivcXkJ2l6u8TKSdbIqNbWyNbZ6NarecUF85zIOURRTFaGG0aIC2usfNQgrCplLIDueIBiDCSj7EmYjL0M3McNXMDSN02HHQBVd6aLrdgK3NjKC3008QAQ0NmeDQGjpQCRPdkajb9cK7+Z/dgy8c89Z8/YMdqJ7bvXK9uoXaV+hhycfMsOAtsUfdmM/dufy8KJIinMfHjfvNvs+zO9Ddy0Xdl8KtxM4SXbzMmRjNxX+tHiDMrRe6Becs61/8xXop3cQRHbAezc5Y3Ztq2MMLHJwezQ4X3dsZzdfo20G53evwITj/xuzux0ePXeJQzP1dvRs33eQ+rY1AcTrI1tW9fa1v3fNd3Lf23a1lvbW3rbDpLf7RbQGN3gK43M4zvh5h2/VFrh6g0TIMB0H1fdIMLbPj3fKlzfJH7fMeHdB7ByB+DJLC7fEi7gFI67Mg4T071yJs3hqsvSeOrbls3cmSm+0R3SGc7QE4AcOV7Q2r3HSMZ7NDHSCSHSDqDbRN7BRk7ZSN7cY77k0M1sdAHTWDHlT+3iUaqkiWcVOb3Tay7ezUPeIl7meV7gqX3gVzHURY0VmEp5G0XoW9EAcP+ttn+L1ZjqNYh+eY/eAVWNVGNd6ZZ+6Zvat27rt5y+6Z0Ot13NnFmdjh+DAKZ+6qie6qkOKx6h6q7+6qU94HuOrQPz6rZ+6hpR67du6xzcIdm519/Z1ztuzaqWnd4ZRr3uPS887Pos64gK54z8pdhN5W5OwWyq1NPe5swe4clr3zbN5kmNwTDu499u5/y53Eme7mTO594O4Qfs4f076+uu7mZu4AFupN1r07EuwEnKv7Prv/D+7/JO4PQOvH1+7y4c5vse4gN8u6+7vvBNoi2+7fTt8OorvBFPvOg+7xxP8M9t767sNvnu7i9O8SXf7TFe7hnfvhvv8S4/8Abf7vH/LPLL7uynzfD9zuQC36MAHvIVb/Mgjt4pT/LgPtPibvF3J5lFr+/CDvThe+YATIs1z+9Br+QxP/Qz/+49z9HETvU8PuLkTvKDHkmG/hSPrumgvujd6OgOdfbNiOlwH/dyz31oz+hqL+p13415j/ehHkMTfPRO3/XIquu77uq5Xvi8rtKB3+w4X/UF3/DFjp38l7MUv/B6Dvk9/vAYX+2A7/WCL/Rhn/XM2/SN//WXf/Ogr/n02/mn7/gdD/PP7u9R/5nzbflWj/lgr/oqz/lV3vvjrvtEz/q3j/rD7/r1fvA+P/pHa/uPT/zBDfJc75i1T/ql//nsjvUIL/LLT/2t/2/61x/62f+UM8v8r4/7qX/xq+/7i8/tns/4uY/+u2/yqEP+L2/+3w/8ov/36r/+P5/58B/8AAFA4ECCBQ0KEGBQ4UKBCBk+HOgQIkSJExlWtKgQY8aCCAkEABlS5EiSATyWRFlyI0eWD1e2fMkxpsyELTvWtBkRZ04AJ1P+BOkTaMqZPGHuzFl0otKlSG0ypej06MehRAVQrapSqtGnW2lyherSa8awF8daFJp1ZFq1IctyNQv37cGzTeXWjXoXa1u3V/muxfvQgFEDCAfzLCzgsM3EixkbJgwZseSciffybcC2bWbFkRF+Bh1a9GjSpU2fRp1a9WrWrV2/hh1b9v9s2rVt38adW3duxy0be+7N8ffkzsSDC6f8uHjl5L6bszzOEiEC6tWtX8c+eDp27t3n0r0bHmxCA93NW3dY/vz573BvLvgLcgEChPDjL2hPML9O8Uan2/8LPwEQAJAvAd0bL76QCEBIwaACi2u8/njSrK0K1doPwQYdZFAAB02CcKEMewpRoxLpuoyvC7Ma8a4PO/xwRBlPvEmvGP1ysMUEOdxQwRknTIpG/XD0kcj4dPTPQx6VLBLIrpyc6sYULRQSQRJf7PHIKhvakkQopTPyrxWrQpJCJuODMccuf5RwvCkxDFPFLl1c8sYvv2ozyTdZjJNKK83E8kwx1yT0TrL/+oRzTzLn3FHBNJvM00xD0UKUT0WHKjPIQO2MNMhJm7oUqDExZTTJTdX8NK9Ouwr1p1FFLRXQOlFd9ahUxWrVqlxRyrSrUyFN8tYI9ZSS0z+P+lVLYUUs1M1iaT0WzGQHXdbEalF8FthoD51WzmtrrBXMXbUal6RekZ1VW0/DxZNYNcsFbFtp01WW3UO/HRJekV51tcvoLEqsgSUFjo+zfycajrnllDsYIssc5ExfkAz2bDeLL8Y4Y4035rhjjz8GOeTTGhbsOeQWhs7kjBJmGDiXjXu5srvWO0+7AWk2j81g7aUUAPVwxi49oLk7F8wC25qvPgUP5NmupqMicOn//6SOVVN6qX1aLHx1kvjBrkGUl6Zu/cx6WEmdfdfYsKMaG86tuXybxK/5tWptStvmM26dzZy70kXtbgrvv8tmVm+/Se0bcLav9pZwax3HNm1oFTdRcFINN7zvxCln1nJYMYf8Js3V5nxIz/sFfWd3i9y89JtOrzv0ITPPtl7XTWecbNXP3j3I0Se//crc3Zadv+K5/F3d27P861HbeYd+3dW1bD144R1lvvHenzxe7tqxtv56NLPXPXrut49ScuVdJ18t58E339bu6eb18M/DF7/59vOev9npB61e8PZXlfdpL35gop36nidAQfGlgOWT3gHbxbfvGVBmxCHY+ASQQf/MqAxgHkQYCB0mwpLZLyURgxgJGZIYkbXQhS+EYQxlOEMa1nA0JFuhChfCspShrIc43KEOFcLDH8asZYhhQBKVuEQmNpFLQ8sOiaAYNP9J8F4S+tkU6UMeLVZnRAoYQBjFOEYylpEBSrvPFo+mFqahD4H9i9p9pibHLoGxjHe8YwIGOJQHEs+NE4yg7yoIQZvYcYwQcEAiFekACJRRjw1sSx/598crUpJSyVtgIckYgQN00pOejAAZHwm7+qXOipccpB95YsgwUuCTr6SAKPcIFEkOzpJOu2VUMAm/nLDyAa8E5gPGOMrhTfKUuDymLlNpzF6KUQETAOYrJ2BIYmL/D5KqDOT5komrZdpylWKEQDSB2cgwVlODpNNmNuX3PzkFUJMDUIA4o2lHc+rvmsxMZz7XSUEF8vKd4XylBSRggXGWc5Y/qeXl+pdA1qGzJXaM5yslIAAJSJOeB01JQu+XS631b5cWfOcAfvnJz7xSmAa9JwExWsqFerSbCjUKK6HpyZJ6MpZirKcDV6oVU6pTXC/d6DfFyEmaIuSTocTpTkmiUdS1lKNm+SghH0pGBxRVAJ50gCxTykelxsup24QqUJsaUzJG9AA1PQArB5DTSHZ1X1X0KSBZJdbYkXWTD3jAZ/CK1GG6dUF+fVBP9fnTfoJ0qnj8DB7XCtgAMLWuV2AtnEsLK1WWqHWMFRBABRTLVvcxdm9xrSQ/Gwq8kJbxAhW4wGYZ61iWPjWyrjVRVLFZSAXU1ra3xW1uO4AQDup0gwqiGMyEqzAgDpF+JEEhcIVoEAMEBAAh+QQACgAAACx/ABoA9wC/AYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On9AADb2ecBeXnI2dtKQInP5eVCNn5pSUmRNTUtZ2dPV1crAADSFRWxJSUAFhaqAAAAVVWQJFO0GDdlMnS70tI2Jld0LmnbCxrPDyR/Pz+fH0jEEywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wANABhIsKDBgwYECFBAoKHDhxAhIki4MKJFiwoUCjzIkSNFAwguinSYUQDIkSNLbuzIcuBHBgpiypxJs6bGkCgvqmzJcyVPlgoXBBhKtKhRowsQBD3KlOkChT95KkQgtKnVoU8FUL16NWvUlgoZDBhLtqzZswmmVuXqFOrXt3AVsr1KQO7cpm7hGlRI4C5eAX39Hs2rl6BCBWcTK04LWPBgAYUjAxXg2GhdypWJEo7MN7Pmxp4DbC58WLHpsowDex4tWbJdz5dDi4bcGkDn0LdX025d+vTp1LJZ19b7OnNs3Ltdg9atOrPwt719L87tPPlwzphhF6/8/Ct17surD/+PLh3td8fdr//c7vi47vHhwTcHDx9x+cTAkavnnd04e8HprRcfegMCaB1pC92H33kG7qecbO6JVxuDflF4V4BSJaigWfm95yCCEP5X4YHEFTjifATWtyGHFs6F4Yd79VdZhPRNaOKFN7pIYlwarjhWhxLCCJ2M7YmII3woGpjkiCr6+GOLbL0o5EBGzkVjijYuiaOWOjbpJJA1ThlVlWxd2WCWweUY5Y5D2vcllFxJOSWZXJnJJJrIcbmmlz6CiaWYAoZI5Jn86RmnmofyuaKfhAIK1qB+2XkknsylqeiGjN7p6KOCWkppdYZeJSdH5C0Kp6hsbkolpHdJ2uWn8nn/yp+bfZ5q1agw0kmXrremOiaiqIba66UKZjqpqh3x2pSre8JKoLB/EXufsa+25pNeFIW2QElpXgsXRQqsBV64yHn7FrgJpKvuuuy2yxi5upn7FUUK1Wvvvfjmq+++/Pbr778AByzwwAQXbPDBCCes8MIMN+zwwxA7LG9U4OZ00USHWYyRRtbepHFEKuH0MUkcS/aRyCMTEHLKJJtkLXziCpbUUpl5ZeNWNavlmc384VxZVj47xjOyBynLFLOJOqukrMpBy5St0RJNKqtWGv0Ykmk6fbXSJzIttWFUl2m1UbjGqDXZwA7L9ZZef21b2HWOXVTZBUH9dNpRF5p121/L/00U0qhinSffPJ49N953ux1jp/qtraPhnwleaeOKr8q4h3oPTnnTe2+uuN9DAa525pNjzrnmpn8O966rj3566UG+DirhRIMegOh5yx6r5whCPpTdW1f+9uWxY+f7bMfTDXbyiAdfue24J+74msxLPjvvfbe+rO3Kr8p89dMfCr7w0HPvq4Df0+5d82iP/7z2R5tv/e6p9955/dkTH6buz6r/a/rYk1r54Oc8492vePZDHQLzF5roFTCBsNufARUoQbcN0H/oO2AFSwRA/AmQgJaRX/iChcEMdXCB3+oYCImyLfNN7CfgipmB4FWdF/bkMDIcEQ3BY8OW0CtiQAyiEP+HSMQiGvGISEyiEpfIrx6ypGIsIwDGKhLFnZjMY1XEIsusGJmX1OSLYNRiyrhYGCc+KodzmZkA0MiWoWEnaDLTWc7OlyGxOGkAjIGjX9xowRX+TYSku54H23RHPAIPbcKzXAMByb+lBZCQd6RWs8jnx9AxcoIR/JNyaGUq9h0ukcNbZCVnM7/+PXJ9nMTUIT+ZyAue8n8a1CR2UlmsVUaulaN0ICJH2Cv3zbKQkkwaJfUnSwgKEoWQfJMnbzlMUZbwUSfcII9oOS1b/o6OgHLlINcXzWKWiJrlCWbgcEnMRmHymNJMZq2WeU1QahOZ3IylOREETumI03WqK6emGtn/tVcKqJ6+uWfu8ulMf5pQnvv8ZSStSUpyFnSbsKSgN6cJTIZ27zrvTGc8JTrPb1aUnQ1tpnZGeVGLgrSkPVqnL/uoz2Pxk20GfRRAf2NSd+byksakHzx/NVPTCFR6IvUPSbE5mW52lKILPSlR53TToZbSkRD950dXii0Vaotb5VLhDgm0VQOZ0SOHcZdY3YXDrF6RiWhNq1rXyta2uvWtcNXXVxFyGDDaVSYMANdd91qyLopxZCvLoklQBti/fiywW+xrGeGTgEIyQCGNVSZ89HgXoLGRK5bdmRx/tlmhXZSxhSxJZNf51H5mEqronGjtBDBaH4k2qaWF6WmHwgEB/3CAhBxNaB9bu6LXSpaXf+mmBg5wAAkIQALEPUAGHmNU3WaPtxvyLWmBe7duioC49UruATTQPoS6VHXQVZB0Oxnbx2mwA8nNbnI/0N3cfne3oYUsbKnLXA16IL0K0e4BMHC45r73ufFl7XwDqVOdYkC76tVuf71brfeF9z7jVeVSp+Zf8xJFvwlObgg+U+FJOjjAD7YnSjssPqJkAMP51e8BGnBNEo+TkiGWToRrOeGiKbWDB0ZwivXLYQZ7GLwgHvBLLQw7/RoXufrlb4t9LMwP33HG1ayx2ZiM26HkmLgWkIAF9OuBBbu3wUB+snx/S2BTRvADKtZvB7w82/9+MP/GvoFyOEdM5V4aZbhpJu5y2Zzao64WzqeRs4ilXLcbx/LEeWYxnwusWmSN2UmCDiidv0w9puRZyYs280439WjXdlrC5a00pUs86hc7GdKfpnGoSd1mIvfZuW8OMpmHLOpW1/rVbv6zrKdbZtQy2tea1ijFVDjWYqtLtMYutmKxVVbddFWHlz3Us3E0bR3N1SA/jKu2t83tbnv72+CG2LULAkWWTZEhgh03QU4WRZUZVmNkZLZJYLJXu77bYvFOoY2ibRU18rspfCwRZdPY2TgSGmx2VObA2/hZG7UUzDkNdqN5OtVniimjE89gqfFJz4rH9OJNtXiyDP3xkfd0Okr/tenDfxxxYPsZlR6PqqMw/vKI2rrJClUmVT+4cpy33LSbztDJF5Ryh45U5BSu80A7ntSdr7bnpj7nr2Gtzk463dEhL3nSN750jzYd6bnKusyhqXSgbjLmQc+m2NNe1LI/0Os6B/uHaE71jd486kyPu9bDDnWOc9Dtu5wV2oXN6bUT/qBcN3vOVSp3B9E913bHNcThzvi9z93wGUf83f2OVL2PHeR973rhAM9KwX/d8o7HfM01vnnRV13CV1fV4yc/+sS/vfOV/zxTQ6/4n8uW7SYffOZ9aNWdYTVeWv33X6q9JnW7JKzJHmuza6jCcFv/+tjPvva3X0TnA6DcKTt3/7vzfa575wSxY1z2t8yPEvQX1mUmg9nOlLJGzR58VQvHbMH3iNL8d2X/ldVw/MF7tzckLsZ5Bkh6zPQ+BBh4tMZqwLd1rdd7DHR0qLc8CthO9NVeuicks8dyf2d7Dih1Erd6M6d6dWdzkgeCtTeBBYh1DVh6D1hlEWhjB+h6MPhQNThlIiiDJOhyKVh4MbiAPwh0h0d2PUiELKWDR9h2SaiBvWaEwwd6TDiFI3eDFNiCK+hzFShUjYeBTxhSM2hnF6geH8iFWjh1kKeCakh7T1eFJoiELjiCvudqTbh7cBiEmreFeBeCc+iDS2iBHWiDGSiGRfh7d+iBKLiGrMeHCP8YeW3Igm8oiDtYaFj4gmxYgnqodkMIhWMYXF/oPYUogA+Sh4y4h5GIhgkYhqSIHZ1oiHV4a4lIiKx4f99XfDVzfNR3Vswnbcp3N94XQ8jRi6gSjNx3jMiYjMq4jNhnjPDnV1SUWM9YRuyXEtUoEuQ3L9eoE9u4MdNYVTYif3M0Wb+IFABIcLZ4jgynFeVYFAH3QfARim9ziXToh46Ig7KXjk61gZkWh074h0qYPfFYhvM4ivpIj4AokA5HkCYVe42Yin0Ijws5iDwIkJ54iHZohYo4kBRpiQa5ajQ4i5fHkZUIhhYJi/YIkY+YjyQpkh5ZiyBJhh25Hygljw1pkyT/N5NmqI84iZABmYaaeIpq15IaSYsnOWlHaYvwQZT+eIUfyY89RpBLOZElKYowCZVLJpXhSJUuaZL3mIWrmJQ2xZSb+I9fiYkPGZRuiHVkKZRmqZL4mJZA6JZM1ZZrmYlzeZdyKYVNuZFcWZQVeZb1CJR5KYls+Zd9aZSCmZCEyZdlWZeI+ZgSuJg/GZaUeZGLdVbO2C3JN4zteDjOSIy9Ipp/4YzMeJqomZqquZoNE5rtJn7ppkInkW6EdVjqV36DNX7dCDK3OS/i+DP095lY0X/CGQCZNY43U5zHyVlKGZmtglMpqZaGiZeOSZeK+IpIeZko2ZiICJiph50H+ZRR/9idiXl54BmToMiQOVmVGLWIeomK0qmKkBifETmJXqiePomZ0VmY8pmP54mVyNOT4smApviecqid2QmXYJmDlNiVVimWAFpTRnefOlmQVzmeGVme31mg07mX5CmZiqmgaHmC/4mhsuidXimig2mfMwKd3JmhIBqYKsqYLFok+2iiEOigFgqhQdWiN/qJ1SWgF0qgDYqiD4qg4TmkBFWkGpqi9LmSHgqj1sl3HNqfUXqiTXqkM1qZDEqh7HmT+DmgS+qlOgqmFWqm7LmZxudCnelsxTkbofmm08dD1ceadnqneJqnp+mMfKqZsvmnfhqo0Nin8beV/GGoyjGVh4qo2P+hqO/jqAgCqZHKqJO6qJb6qJTKI5JaIpuqqZnqqZTUqesjqqP6qUNCqr8ylqaaqqvKqpfaqK0qIKr6qpwaqxmCqrJqq48yq4lKq7Xqq6Daq8B6qriEq7uqq5NhrMc6rKUaqsg6NcqarM8KrdNqY7wKq8KardiqrZXKrb+Kqcyaq+Eqrt4arNt6rn0UrdQ6rrdarTGirusKruXarPNKr+j6rfdqrunqrmADr9bKr/0KsKtyrd1asAaLrwerrwi7sBLJrtLqsA9br64qseSqOv5aaBeLsQL7NhkbsM4KsfGar8QKsiGbsCMrryI7sSmrsiZrry3LsgpJsu+6sSjVsQP/u7E2y7EyO7M7q7E967EU2xOAOqiCSo3fuH5Hi5uESrRFK296+rRQG7VS+1auGUWwKY1La7RZK2+zibW1CW+9OWz7Nn80w5w5W7PJaX/+12+tCCJVWp8uy7Ani5FYGqMbyqR2C7QrW7GxmKNG2p4lGrTLKrhv+aRxSaJvC6Xt+rM3i6Mh+bd2KTY/ureLS7hOqaSBSKZ/i7aMu6NIqnKJe7iDS7mj27ePm6WAG7oLCrMK27qWuaX6yXOqO6KlK7dx+6J1O6XmObsrertz+7JX6reoG7lxM7nAW7mkW7j8Cbf+ybs06rvQy7qva7irK4TOy6XSG718u5/VaaBUiLe6/+uzlluy3Puh4UuT7tmhyHu8tYu7wpu3Oxm4yTtyZ7ueOkq8rIOT9YueQaqVA3i9sfu7tpu91Gm+3ouH4HvA5Ou62gufy6u4nAjA2ynADEzAwXu68HuLZ3VVbNq0SLu1SEuadyPCj2GaU3vCKJzCKswwVWtu4Pd+IKy0Q6u1X4tvYQtDNyy0Y1szwWl/+0uz7OjDOOucVWO8A7y9FXzBEByxY+qj+gvE/Eu7TJy5TqyeP+y4S0y/E1rFZ6qP9euQ69uFXPylXgzEYNy+NSoYuvS8FhzGSezAGdy2JSK/7DvFRwzH59u4RKq5qMu54xuim9uc/5vA6ovGDWzIBazA///aozb6xJ2boIEMuoRspW5MwZY8n30syKU4ycyLxJfcxgdapprsihLsx/O7wNObyZLMx3Fcxo9cdJ2Lv1axxtjryYdsx+7byZXcpWMsylcMpFKsxYysxi76ybYMysrbyqvcy5EMxRHqynvMzKrszFgsuricxpFSzLcszH8so838Mhu8pkMVw9qYtOUcp2bFtNC4wuzczu78zjnsQxljtS9sm+ZMMRpBb/X2RfH8RP3cEexGm7p5zzD0m0LTw8jJHwnHa91caGsLcOqof6PstpycxVMzdOYBzXT7vnmcuhVtzcF3FhHgACRd0g4AASyi0aYrk+wpy9tjxLg3FhCQZxP/0FqM8cViSsWNrJ6clAATkGcH4ACoYcornZ4V6tLxA9NVFwFATVyjddNmnNNivNNdTCsJ0NTE9QBkAdWvnJ8TzMtUTca08gBYTVwobUgqXb5Sqsi7+9HVK3RkMdNlfQAU8CRpncssfb9ELLk46SZkrV9ZtmX6hdJc3dBO+sAgfZ2lrI9uItfadWT6VdNojdOYO9XErNQwRxYUgGICoF8RYNeUzaPRHNaibNWcrV11DdpRXdmy69bBfNFm4QA61tnJddaTvdqi3cSkHcg9/dPYtWNavdVErda5y9Z3y8odXSoDwNS/TdsHYNPDjddG3dJ7Xbx9nRgO8AAPUC/a/dlD/33Xqdy9hfy9yG3cYFNP9YJyof25W7zbmVxPFSAAFaDeuM3ew5zNmE1xpnEBFXAB9N3VUq1v0KhoQkXgjtEA/wxWArAB0Sd9BH1DDy7P7ITgE57gdAXPGJ7hGn6n5IzPEQ7QFo5tIU5uI75uJf58Hw7iKa7gwThZCPDiMB7jMi7jAjEVM37jOB7dxrzLqQoSOP7jMA4VPg7kP+4WDHDkSJ7kSr7kBi0zCG221GzY+KecEf1/ArDQi4LUR0HLAbzNqOzlgIzYIwxMWh5C+X3MaM7jSjzd44IWJm3SvJUAZV4UXP7VyMzNpxzm4i1tZyHbaZbaWz3nf3Tmal7oiLzm/f/rbBzS1N79I4JuSYR+6IZ+zZi856jCSX9N00/96LejzXf+5Z+u5wa8JrTi2EAt1I5e3flrxVG+0RhMbWWx2WUdWXKu6rPs6Wku6bqezKPO52PB3GUd3LU+yOU93nhex8dO3BytI27i28kV2GnWWMO+ycVOybue7G8cytT7GG7i58kF2fqV2tNOyq7du6HOs1LuubA7G27i07OtYrTO6XWu42CO7q6e11xVFqaeYUEd6Lb+0o6c7pAs5txuFrJ+ABkm2alO7NKszK1e1IleHT2NX859AI2OR/KO65OO7Tuu7QSPNqnk5wkG6AtP7Q2f3OBd7y+ZlKmUANrN3dpt2yX/T+7VrsvXDuq5zutrTeqmkd6LkfGRTunnLr73zub5rhjxPd8//+9JHfB5bu8QX1/DuN/97VPOaOA2ivV+QeEdjsMrfuHO6DsULvYKweANTlYbnvZqv/ba1/UQ7nwfMcNcK/cfTPcyXKiv/PB3fPPWOuREfuNC/vdFPtF7L/Qbj/OHv8jK8dBtEcQJjbLIrvhPT/SRL+o7b+0gZ7P0PvR6G/UcSN15H/qiD8yfr9ejX/lQX/gcX+m9bvOZL7Cbn/Orn8jFbex+ybixn/ipL90Rb/oCf3+5z/e7H96tb9Fhp/kpz/l6XPS9D7l63/GGr/uUr+yvbt47ify/PPmHbenG/z+SsJ/8so/4Hs/9iX3835/9qL/y9g35qi/56d/57W/5tY/5kIn74C/98M/7Un/Uz6/y+Q/9AAFA4ECBAgQQRJhQIQCDBAI8hBhR4sQADSlepGhw4UaOHRdq9MgRZEiFI0kmNHmSYEqVBQ+2RGgR48yHMmliZAlT58CcKnuS/An05U6GQ3cGDWnzZkYBDpdeREpUqNSiUqN2vCrSqM6sG5U+jfgVbM2tVGF2/VjWp9q1VNGWZHtS7Ni5YN8mNEDUgMG8O/cK6Avzb2DBfPUa9otY51+nYyE2qLsUMuDDBi1fxpxZ82bOnT1/Bh1a9GjSpU2fRp1a9WrWrV2/hh0bNv/hloMr0z5pOzHl3bhzKy7MezHw2sRV+l6LQPly5s2d5zXoXPp0BHdjxp1qFXvSgwaof1eu0Tt46tbN8hSwwDHEBdXTr3+4wDx6t9s9zneJQD18+QL0ww+gv/OIMgjAhwgo0ED8qtKuvvoagy+ypRasz8AAEBTAwgU3tA+rDrWCcD0Jb6JQOwsx1PBDr1RMy0HtQnRsRJpKJDBDA1FUkEW4XKzxwRRhpEvHAdE7MUEAOeTxKCGvA9KuplIckisbAcTxyCVXupK+Bl/8EcooWzISviojzNKlJKX0Mccmn6JRySKnJPPMs8pksMY1J3wyxy/BhHO9MUWkE8kt7exSzz3/5erTsT9jDLRROfm8k8Q8rTwU0Te97HFQJdO0MtIZ6azwxjABfbQtTaX0dCYZVQXVRFETDbJURGUVKlWcJo2zUu4uNTRTXzflUk1MddWKV0pPnZNW7myFCldSifXQ2Fx/RRNZSAs9FtoVpX2W2mStXYtZpsSdqE0puWVU2fscDbbTYbVFCVawFo0V3FntrRXbaf2qrIFX/YVvMuRC0m044YIb2CPGDJyM3IgErkw2iSemuGKLL8Y4Y4035rhjzxLuqGCEe7utZJJPNhhkjkSurT7yqIPOv5enE9RbPh0cb+bmxNPZOXPn3G+99gwK2jEB8eVOXQ//4y+6osc6Gl64/9Ct12ZTrUbU4bCcTVfqqV9991ussyNU2F69xpJqJ5XWiu0VtYZo1VvRjldtNtkdO+l2yYSbLLqvsxtPt1tEet2+K+K66r+LClzSwXcs3MPD5W52cSLBPrvavNfllO+w0R5V0dAV1xzYzSXXt9vFR5+X9bsfjxd2JlPv2nLXl6J37cjb3v1t2kmn+/abcn+9d8JPB/F33W2X9yniBTcecuR9Nzvb1ZvHXfhPZceSe/QmT3z56xvfPvrYzZ+9+n3/1n6m5x1Hv/v4v1e+eObJZ9V7M+d3CXzwLXcJ/ubGvzqZTkn++5zX2oeR95VvesczIKrqB737Yc56pcOg2A44Qf/46aVf/wIQxFA2shEWJ3ySodxERGgwj7XQhS+EYQxlOEMa1lAzKtsIy45jHJLo8DcHMyEOF+LDHvKwiEDcIRJz47KeMSdmTdwZ3iKoQTTlDIo8g2J4WlWjp4FlaO9pmhQzeDPtME1oTgNQ1MZnwfVR0Y1kLJu7Mhc87A1vgZUjYM02qD7Vsa+ONGlg/vIoxjkhcI6g+6P77sgU/RVwjOHioAPXSKVFlouQcJzitfhYu0mKqZIS0eMj7xVHzx1SgYlk4Ce31shQFjKSgqwgJVGJxwdKL5OQ3CTwEClAWt5ylL7MVy7F50deMnKQrDwhnv4HQFVCJJADrOX5opk+OV7/kI7FtCQykWlIa+6SjX1849WAuaxXQrOTfmqm3445SG620ZuyTOAvRUm2PVbTnafEJigvKc55klOY9jun6GZpzGnKr6D0+ycFAzqWZ/ayn5xjZzkdes1vcvKhHtqmRAnKr90AzJMC8KhjVkjClJnMYH1rGMOMSDAbttSlL4VpTGU6U4oJUSFEZKkSc2pTvKxUYT4NGVBXJtQcEnWIRr0pA5S6VKY21an7O6gjwynPqdLzorwjSgIGsFWudtWrX2VAK/lZVb1FVaqYvOpGtPpVtrJVAWKlKlrJClGzwvUka+2qAx6wV74+wAFffes+rTpXjK6zrnTC61YhcADGNrax/xDwamD1Z9fBytWyY73sXb06Acd2lgKRpSxdxynatEKwtLbUSWIj0FnWRqCrkuVfaElLWKyOtrBZ5WoCWMvaCeAVtlGV7W1tW9vTSnMneAXBbln7163+drimpe2KtGnY425Vt8pl7VqdW1yDPhe6ma0sZsWr2QE4gLUWkIAFltvc4EqXutztLny1hNsBJICzjpWAACTQ2d6yV7Bl9S5qo/vd8YaXvANYrWMt01nX+ney/wWwfKEaYOOmtqsUULBBHPtZrm53wBX+MIjBG+EQX4cqiV1sYxf82Ne2l8BxLTCJRzzbGXcksQN4gIo1zNgHgBbCwpXwWWNM4yEDGSY3vn3uAVZ8gBt7uMbuPex0oyyVGw8AAhGIgGWwDFkfP1jKFDbxe4/c1gFYhsxOLjKUwRzfErP5yWq2cFsrIIAKnNnFAn7ziw28ZxmnWa1kvkAFLmDnHxO3zfNdM6KDvKAENNrRj4Z0pBOwAaT2VKc/vTSmeYoQnGrapCQFdW0CAgAh+QQACgAAACx/ABoA9wDYAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On+AADb2ecBenrJ2dtKQInO5eVyRUVENn4saGhOV1eONzcrAADNGBirKSkAFhaqAAAAVVWQJFN3LGawGTu70tLcDBbEEisxKFpoMnGiHkU/X1/TDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wANABhIsKDBgwYECFBAoKHDhxAhIki4MKJFiwoUCjzIkSNFAwguinSYUQDIkSNLbuzIcuBHBgpiypxJs6bGkCgvqmzJcyVPlgoXBBhKtKhRowsQBD3KlOkChT95KkQgtKnVoU8FUL16NWvUlgoZDBhLtqzZswmmVuXqFOrXt3AVsr1KQO7cpm7hGlRI4C5eAX39Hs2rl6BCBWcTK04LWPBgAYUjAxXg2GhdypWJEo7MN7Pmxp4DbC58WLHpsowDex4tWbJdz5dDi4bcGkDn0LdX025d+vTp1LJZ19b7OnNs3Ltdg9atOrPwt719L87tPPlwzphhF6/8/Ct17surD/+PLh3td8fdr//c7vi47vHhwTcHDx9x+cTAkavnnd04e8HprRcfegMCaB1pC92H33kG7qecbO6JVxuDflF4V4BSJaigWfm95yCCEP5X4YHEFTjifATWtyGHFs6F4Yd79VdZhPRNaOKFN7pIYlwarjhWhxLCCJ2M7YmII3woGpjkiCr6+GOLbL0o5EBGzkVjijYuiaOWOjbpJJA1ThlVlWxd2WCWweUY5Y5D2vcllFxJOSWZXJnJJJrIcbmmlz6CiaWYAoZI5Jn86RmnmofyuaKfhAIK1qB+2XkknsylqeiGjN7p6KOCWkppdYZeJSdH5C0Kp6hsbkolpHdJ2uWn8nn/yp+bfZ5q1agw0kmXrremOiaiqIba66UKZjqpqh3x2pSre8JKoLB/EXufsa+25pNeFIW2QElpXgsXRQqsBV64yHn7FrgJpKvuuuy2yxi5upn7FUUK1Wvvvfjmq+++/Pbr778AByzwwAQXbPDBCCes8MIMN+zwwxA7LG9U4OZ00USHWYyRRtbepHFEKuH0MUkcS/aRyCMTEHLKJJtkLXziCpbUUpl5ZeNWNavlmc384VxZVj47xjOyBynLFLOJOqukrMpBy5St0RJNKqtWGv0Ykmk6fbXSJzIttWFUl2m1UbjGqDXZwA7L9ZZef21b2HWOXVTZBUH9dNpRF5p121/L/00U0qhinSffPJ49N953ux1jp/qtraPhnwleaeOKr8q4h3oPTnnTe2+uuN9DAa525pNjzrnmpn8O966rj3566UG+DirhRIMegOh5yx6r5whCPpTdW1f+9uWxY+f7bMfTDXbyiAdfue24J+74msxLPjvvfbe+rO3Kr8p89dMfCr7w0HPvq4Df0+5d82iP/7z2R5tv/e6p9955/dkTH6buz6r/a/rYk1r54Oc8492vePZDHQLzF5roFTCBsNufARUoQbcN0H/oO2AFSwRA/AmQgJaRX/iChcEMdXCB3+oYCImyLfNN7CfgipmB4FWdF/bkMDIcEQ3BY8OW0CtiQAyiEP+HSMQiGvGISEyiEpfIrx6ypGIsIwDGKhLFnZjMY1XEIstWlkWTwKQmYASjFlNmxcg48VE5nMvMBJBGtgwNO0GTmc5yppU2ciUrYnHSABgTR7+80YIr/JsISXe9CBKIVqZi3+GEZ7kGDpJ/S6NgUTwgAA+gCpGYAh7aGDk8Rwbyd+czoQb7ZxQHHOAAEhCABE55AA08BpPF0uQiGXnBAA7phPQjSgdOWS9WHuADaIPltGQZOVp+8naPnKAhI0mUDLCyl6zsQDD1uEdigpKTtfTgLUfJzKFw4JkK8eUBMnA4YZaHWs0i3zEduMkR9uqEGPAlNH1ZTmqiM2nq1N+fINn/NQWKc56sxMBnzCmdewbOmPpslDILWUgN/DOc4jyAA0BJUN8Y1HWqS6im+Mk21IHgoQKI6AEGak9rzgab60wmBBmaS3GmcpXiFChFS6rIYubTkyV8FC5JOZR4stICErCAODlQTz1eNHcZxakt17fTbgZABCL1JTlJalSTdu862UThNiXp1A9E9ZTSLOqbanpNhCpVm0zlZj+L4tCoTlSstSLrSc2qnWNe1apyPY8pRQqCV9LUfUmta04n09S1Vqeiv8ErSjV6LI4+Tq04Qqxpjiq9mwp2qf+D7GORI9np5DWUgMrqBgunWeqlqbML+uxwzugR0LXQri674kLseKgd/x6Stpd0l27dhcNyqZCJwA2ucIdL3OIa97jIzRdrEXKYMDpXJgwA13OnWzIzjnFkXNxidQtzsiiq7LofKyN34ZMAajJAIeUdK3z6eBeg4bYpfywRe9U4x59dlbzULEl64zo/nmo1rVzdqOr26yP9VhW0hC0tPh1rWsxuCr35hbB63fkXwK40l6OtnQAIvCIDT5iQGN7nQkOsUEByeEMe5i+F72ZhDir4oOo8sYJSnMj+OlXEF/ZvhpElYSfROJMITtZnW0zaADd2wBHe8IFtbNgdZ9bI1XqfjO/z41gGeWqFPXKOb1zi7E25PFUe5pWLNuTBCjnLUUayHsN8zruW2f/BooRyOqWc5C9b1M1onvOIdYxjL9d5ySt+DJG3ukwBm/jPH2aw+MyM5Rdj9NBr7nGNA90+RpM5zwtWs48lDWQmdxStTy60lv0caSUnes9cNrSL5ZxpSG/a1CoGMZ+7XGRRp9nVBea0lT29WVBnkNUwpnOp7ZzYMZvN0UjdcpP7nELZ7vbZ6tIvtJ+9XWz1Vje2NdByDRJDzr73L9suyA+TS+5ym/vc6E63uiEWboJAkWVTZEgX2+0S8Gosu2Ss9rc08sXphtHeFhMvtmC2M6WwcWdunq8b6ys0N+dxrAq/431txNhbr9rWekYQas2jWrr6B7a8brCvM7RxFnXcsh//t/Sxgf3oEpUcNYr1+IxUenGWMrtNf1W5g0R7cwBjvNUuzzmcQ5tSkFP6cINe38vJQtkHMvCyI9cpsiurnKU/6eTvq3jGa05iVfPI6tXEemBTPvQEszzZQT9w0lXFc1oT2uZuV7rQoy6mtnv97V0f9dfn/l+2F13ny5u60/eudsCrx+56x/us7y73wpd9Tn9/fKPPTnXsgL3p7UT5zI0u61QnvvFjXfuDI0/3Mws+81Xnu5NHr3Wg1xrujP/V5WOu+SJxXtEklDyZZy92QLY+2Khedtxlr/qe+/33LX993i1O+NAbHgD0hr5rueVb2WZbh9++W/TRNe3dXruGv123//jHT/7ym//8RNx+xqIYb+8KfN8mQRl2AZ6T95+L/ijB9/xja0aC18zg2YcUCReAReFeCGdsdVNHBxhxXTFx/IF8aKd8i/d5oQZ7FMh6Z9V3v/ZzwKdsn6aBjoJ4zKd4njeCPmeBJnh8Gbh6ccaByUeCwhd7GAh1ICh1lDd4MPiBLEh0EFh5HthrNWh2LhiBGtaDOHiCy7d1EliCSliEK2h8G4iCTYiEE5iCM0h2pTd5Q+iDXFeFU6iCNLiDQiiFrpeDQCiGkGeEqBd8OgiFLUiGHfh0WBiEpneDa/iDIkeHQiKCXxiFSViGVMiEgAiGc4iGWgiHLxiIMXiBIUh6ev94aac3S523iFbYiGooibj3Ts+HV6JniU84fH7ohYMYioIYhx90iTaViRW2iW+WhbniiIYIiXaIiWx4hm7Ig58og2/4h6ZohnkYi9ymQtpCffGiQtcXWQR4OOo3W95WfdaFftAYjdI4jdQ4fsu4jPKmXfzHXfiXEt0oEvY3L9+oE+O4Mds4cDbif/aFgGDDgFZhgHS0XsmIFQwnR+x4ePfYSa64clt4hBXIi4nod/DBipjWi4rYhqDYiAOpe/yIiER4kLaYkES3kPuYgAUZkKRIiX2okBTHkBYZialYi794i3WXjzS3hBo5irsoigYpkB1ZkYE3iyGJh4vmkfvhZgT/CZJlNYkIqYsT+ZKP2JAA+ZD/yJIYyZEPaJPeo5NzxZMR6ZMlSZFB+ZEyuZOqyGLPBx9SCYxU2Y932IWleJQ/mZQwuZRV2ZRXKWhZmY5AyZUx6ZW0SJO5V5ZYZZK3J5I1SZecuJb8sZUkWYdwOZNgmZItOXp+KZFjOJRciJI9yYhj+SBK+TYXSZQZ2ZiV+JjYkZNniWdM6YDKcZhQCZgOuZi+mJdT6W4qtIzdYozzSFGtORvY+Jrfx0PhV422eZu4mZu6uTDY6F3tN2+pKX/hVY4go2/3F3/uR5wQEY4Uo45CA4AHKI8L2JrxxSPuCF/16EeemZm56JiJaZSUuZJh/xmeuBiGf3mIiumPlfmU3pmG3XmZ3zmepAmRI4mY7mme9ome4DmfRSmf6umJ+Bma+umfX8mY7Amf91mI5ymLgWmVeDmXp4mPqOigcqmJkbmXkQmarXKSpQmhbmmWDYqWY7d5mhminLmZ+aihVXOXFbqKF9qKEVqXEyqiLYqVLzqZ/HmFJHqjnWmXOPqf5amg+cmgowmk8UmYYlmSM3qiJuqjPbpY77mRNtikIWeaHyqjUaqSU1qkBdqhFkqXKio2LDqYlimlR1qmWqqkWVqY4omk5HmmB2qmCbqjegmjV4qhYNqWtleiXBqXZBqnadoRqrkzxAh+1iebzFiMh9qMiv/6jLv5qJAaqZJ6m9eYmrJVqZdqqZm6qc+4fVrJlp8JqqHal6KKHZ+qTqdqqqWKIKmqqqT6qu/TqiUiq7O6qrUKq64aq7Y6JLTKI73Kq7sKrKgarL/yq+tjrMVKrMk6rLh6q6P6rLkarayqrIZJrY+CrAKCrRmirddqTNwqZN8Krtbarc3qrKoTrmSGruk6rpOhrjGCUu4KNvEqr+wqruXqq95ar+t6r8IKrdPKr8earwCbrfq6r/5qrv96sB80r2/DsA1bsO8KsQkIrxK7Kg7rZhdrlxQ7sNtasRbrsQ/LsR3LrAobsCJLriVrsim7rLp6sva6sgTrsi8rrQibPQz/i7Egi7MyOzUbC7MoS7P4urMGC7T9eq4eq7M++7MJS7Qqa7RCO7E5q7FR67CY2qmcyo3nCH9Vi7Vba21Ze5yTGrZiO7Zke1y9yX7vlm9fK45rSzHGybbXKJz3ppwPwZww5JwyA53xOLVRe51toYB7W3uCwU5+GrRJ265H9xmdGKR0GqNIy7T96aY5CqBCKqBDu7SY+6f1abkfwoeByrN865Sb2557CIsLGrFPC7Wia6Wnu3OmO6Soe7gzW6NqmaF6Orgc2rSZa7gP+qUxGqZxM6a8u7tFS7uVZrtkWbmke7k1O7yay7qw67pLKrWpS6+r66Gte5Ovy7mqK7uge72+/3ulwMs6rJixVYq90au902u+UfujXcpj27u8sQu5MZuWx5unydu4d0q93kuk6fm+hKi/2fux1UvA9ot0fAmZASq/3Uu/I3vAipvA3LnACPq9/Tu/xovAyKvAylvBzOu8IOylLoq/HCzA6WvADqy0z4u+3IuasjWMLqSpVnu1XnuMOmLDa7KMZbvDPNzDPtwwZwtvabt/Xau1Msy1chtwb+u2bXu3NkKderuO7FvAb+O3R1GdxSuHJtzCKEy8urvCSTq7WrynLzrFEEyhzeuEFCynFpzCiAu+byrGp7imYdzGXsyyvRvHdjzGuCu8WRzCX2ygbPzBatzBg4zBaRzIIv88uSo8x2v8uYSsyJIcuQxsvVlHx3ocyXh8x+t5yA3se5jMyG98wZ8MxpmMyI5syJCMyn/cyvS5yqWcyltcyV2cyJtsyqIsx4U8yx7MypNcv3mcy3ssy2Rcp2YczEaqy/C7vqFLym/puCl6u5GSu7cMyNW8yMk8zLtczNB8zBksmNaso9y8v96My9msyeLcx+XbzMgMwMAMyo/Mpo3sytf8yvI8yiM6zgMcss5cy4IMy5bcfy9MqDFMw0Zs0MeJw4dSxGD7ww790BAd0U18Q1QkxOunjQwNt0fstScxb0lcf0vsxPwBxTQjxezMyRlixQIIuCYtuNPsx7/8wG4smv//W7jbrM5lfNLmfL+/K80bCtP1HNTvvNMaTMITrMr3rMxCLdPfDEqLq6bxXMfozNS23MnQy8VYGtWnHMv0PNT/zMK0nNVILdW+vNTzbM/hu8/jaxWEC85dTdXh3KZomtSly8xU/LhNjTwSDCKhfM5l7dVvvaU17dbL3NfuDNeBjc95TXuXrNXCPNVnbdZwOrq9LL2GbdMxHdmAjdYj3NP5q88nzM8zrc2cbaNGzdeO7ddcndmKTdQRvMFHzcueHNAordlyDah0/Yp23c+iXduTfdVhvdbbA9SbLdlKbdVgXdnqe9mEbdyk7dz+u5+q7cLP+FYfZ92O4QAhTdEZzcTL/+g72k1W4a3DEl3e5n3eu9ndMLTdT8Te7T3RPuTegirfrQXf763ePbFeCLDf/N3f/u3fAjEV/z3gBI6zDHDgCJ7gCr7geD2rIEHgEM7fUPHgEQ7h24kgJH1wgascDxdrvJ2d7QXi9BXNn43Tdcp7Geu+mB3AoI3VpTIWENAAMj7jDfAAJpfiTypzLR7WL/4AUTUBBMYYOI6iUJrah/0oiJQAE/BVDQBz/NvORe3ZJbzjyr0qiNQAX3VK+yXk7ZvjLv3TrEgrCZDlpxQBTNfgX53WoS3c8UPcJFcWEUDmp2TjYTfkVKrjJg7NbuLjcn4AE3B1dt6nzZ3OLx3mZBHn4v8EVEIlTjbO5Xdtp2rt0ytq6GPB5770UuIE5HXe5USO54X+orSC6OAUUr4UAoDO6Xf+5ZMO6qgBUr5EAWf+5Ivde3z86SduFlg+6r5E55v+6Co+6JQr2wBt5Ryy5LwEUQdg5rEe6IONxsSc5/uLSRCg61ru5Mwu3UcO1WO91WADSw0QARFQL+AOATeO6oLu7Ddt63p+GvXiWddOoCtO6GDO6qZRAQJQAe5u7s1Oo7U+77duGhdQAReQ777u5SajQthtewnvF+OtQhvQfd5n3/Mt8fUN3tbU8LKF3hq/8Rxffvgd3xTPbfTNXCEv8iUvbiNv8h9/3y+j04l93IjN2uD/SuEVPuATXvMWThsLvvM8X84v/9zFHfQpTZ0ivnAC0OGL4vMyD/QxD91Cie1wlfQuv/SQ3dpxLdhQT1WoQeM0zus/ovROv9phT9uu7dSndRa5HlGwjhpgL/S2/fZWj808PS4ckuXkznRt3/RuH/dU//TwrvVjIeoipelfP/Vj788/X/U0nfUzRRaWzuR4b/h7D/NwT/m/ndxrQivGTubplQB5X/lMD/qKP6CS61cw3ufJXviPLvmTH/qXr+Y6RBabf0qKLlLl5fmsL/p/rfe8j/V/3/gDMO2XrkoRpey4v/rIn/xnrNecxXSzD1DVvkefz/eH39tVfduUnfll8fjQ/9/kqv/h94jmib/7vl/601QWFEDtp0T40p/71N/6ox//fm/+UT/mx07qp3T33z/a8k/2V//+AAFA4ECCAAQIKJhQYcGDBAI8hBhR4sQADSlepHhQwQCOHRscOHBQAMgDFDpyTLBQZcKDK10KbPlSZUyZC2nWZIkQp8KbOwdaxBj0IVChGDWeRNmggUilDR4gTemToU6pP6lW7Sk1q8+tO7viJFo0owCHYi8eRdpRZNqOUavCvIo1Lte5dN9avQs3r0GyZs/29TsRLdsKAiqwRbn3a83FLxs7rus1MtjJjAEHjhgWc0UBGxFfqHAB8YAEBu4aOGj6LWoBqqWydv069f/p2atrV2VddnMAB5oD9xawIcFw4sWNH08gUvly5s2dP4ceXfp06tWtX8eeXft27t29fwcfXvx48uVb0z5vO73s9exj+4SN/v3O+Orn46yPW7EABP39/wcwQNMOCrBAAxF4zKUEV1pwpspkasmAAyfsL0IKJ2xwL5sEWGC3ABZAkEMPF8iQpwch228/BDrcjUT+WNzMRQ3zOsjDhwio0cYSc0qRxhMVvGw33wLbcUarbAwARwGQLJKgJvHycT/dhAxysydnzNFDJZn8kcEuHezxriH9GtOsKzXMcrctdfxywzDfOrNMseQs6sz9kFzTwzjbNPFNuaa0skrM7PQRzzT/A/VTKz559BHQQQUlclEj4TJ0STYTtSvKRrl0NNJJ5aqUS0wlG5WyTsmEFNVPFQ31Uk3FlNTJWK061cxUbV2Vrlb1nFWvV+Hsla9a57yV2Fwl25XKUi1bFsJi63xWKEJhTRbRX+VqFrJhod1W2mDvtDFPZa9VNFsguw2KTm+PpazaR831El4H0TUq2nS/LTTcQ98lN1NYpeRUVHadddfTfkk92NSAXR0YyIJVTZjZiJ2l96+Kx2oYsodxnRjFjs9dmNeMHdbXUpE/jhfleUMe9zT0HCgZ5t2Au6+m/NyTD72LJQJu54hoRs88oYcmumijj0Y6aaWXZnrpmmW6Gb7b/6Ruj+qnX4qavqmtztk+l3288MAB+QvbwD3ldTNKCcsO0EK2AZwWWBgxA/GguQOTUeW0/wX7br9cXHHEuEEtWWC+DwcWYDZ9zgxfagtnOHG9+9x0ccNHNnFjYydnFPE/WbYW88whP9nzcjmfinGI1L1X9A01hxZtyk2nS/Wh7K3X9dG13Ndg2hH+XWHLI9edUtJbljx4iWG1nbPmB2f1+NCTpx7bynl93nFgYV8XdVllzyn77IufivvWvYdSeYpB55d843k3GXnr1feYefZ9d793v8Sdfv7qT7ff8EqXv/hthn/t+5+/EigZ8V2OfPozywHx5z8KAhBYDSRe8SAoFv8JQox+QAJf6u7nQQKaL3fo89UHvYTBAT6wgJjpIMdUCCYUCmuEMiyh9BBYQQXykIE33Jz7+GLCv4TwezVkXb3Gl5eruYQ1MlPTQaCIGaB5zYq42ZrWcEeRntmoilhsWhjFOEYyltGMZ0RjGp3TxJVkDT9ZtBkc41g1LbJRJW6cox0XgkeoyRFqKnrbf8YWSLgF62xIRMjaCBkiACiSkNCji9/MUjcRteiQM9wbsAJnyRcJTnuEg58DLehDyigOe6LU3QaLEsMgLrCUiGSh/DT4wsCwMnaINCKtgHhLIapSKLbsHiZn50rLxLJ/syQixnAJy10GM4ehzGAPRznN2jX/83y9pOX+fHlNYjorl3Ax5g6RqcMJUhN43dSWNU+IzWQKxpDv/KYNBSjLVGYzgttcJzpByMx5HrOe7ZTIJfWZsgCeMpquw+dFgMlNUi5voCvrpzj/SU4SNtSb/DRoC8cJTY1K06Pn/FxEyzlRjtITpOZ8ZUGptER2UhSHFq3fQzcUzpEi1J4cTKgyhdk5mZqIphXdaBRvysue8hSmIBMpUEkqVFSe9KMpvaA6i8jEl8XMQ1/E2RWzCkbbdfGqfsSaGsU6VrKW1axnRevQ9KgQPoaVjnnsGhjXmpC2OhGsdn0rXFcTzysJ9KgELepU4snXXvkVpQ79Kw13+inDOtWx/1BNbCYjO0wXfrOxj0XsYS+62EldNrNP/SxmNxvYI2LTsvBcJmfTR1ojeXa0kzWqZmMKW8EKkS+nJWxuU0tA3O6WtqVlbQp5i0LXvla2+1StcCtLXNQm97a+Pe7Aiovc4D7XudOl7nKTi13A/na13lXuLHt73eZWl7vsOi9lo6tY85YXvGhyL2iNK9/Zrley9s1VemNL3+zid7+irW89x9te3ZLXttYNrn7/C1n/1ta5BX6vgh1M4BpCuMHAjXB8+dvd18R1q1zTKojl6mERf5hqaUVxilW8Yha32GhzLUhd23jXO9K4xgIoQI51vGMe9zjHB/FxkIOsERLXscj4Yf9AkpW8ZCY3Obwbvi9dhDzlHQOZylRuCHShvJAEjMbLHGGAhL+rqCtjGcdlFnKWDVzDLn95NAoQ85Mlg+Y0n5nOPVYzhZ3b5pM0gAJ/BjQFGpAWOGuYwVK+M57tnGgd5znD8eQzRx5AEkqD5CknKfSA5cJoHluZ0z8mi5YB7JJIJ2AClab0BJCSaebu59ONXjSnHX3hMW9ZIZGGAKorDQFMxxnBm351ATz96VnbesGhxQmfE6DrSnOAz6ze7juDLexYM7rYo+awsQvC5wgwu9KD5gi0EyztYA9b1qFe854l7W1Ut1nc4N3TtM1tbXTrubpt/kilLSABC6Ca1wN4N631f03mclc70dc+NLbZW5U2m7rSEhCABCo9AXf7Ot4Fl3e9H83mjuSa0iLZdUcCru1azxnjBSeAqBOe7JNQ4OMHobRJRG5xcr963gfXuMAHTvKBRHrSJAE5SS4dbpq7+uQ2zznP5bzymkR6AN0GSdAPEIFVF91HGc94ytN9b6gAHeYgcfrIFa5eRCM96ypHdtPT8gAIQEAkbR/6zA2ddmdhHeVon+9OnH6StSBG7EwPsMnNfvetv3fvHSnMYfxudVjZHelat7fhRwMa0Sx+7nmnjOOJnfSxk73zCUFO6JGzARvvsfR0PT3qDX7nm7Oe83odcYjpExAAIfkEAAoAAAAsfwAbAPcA1wGF/v7+RzyKAAAAAICA0M3cOzs76ejp/gAA29nnAXl5ydnbSkCJzuXlRDZ+c0REL2dnS1hYqikpKwAAzRgYkTY2ABYWqgAAfz8/AFVVdyxmkCRTsBk7u9LSxBIrMydaaDJxoh5F3A0V0w4hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBCwIwLChw4cPFyBIuBCiRYgLEh7cuDEhgooXQwbIKOCjSJEkOaokmJDBgJcwY8qcmcAjyJMYNa7cyZNjQpwiCfwEelFnz4MJCRAtKkDpUohGjxZMqGCm1as1mz6FKkCqV55DtzYUKkCsw6hfASQ123At2wBov1K9SjdmVqds46bdq7bsW7Jv4Xbl2xevWbd5B/OdW7fu3cB6CXsNaxbw28hHER/WelnxXsaNsWoWi1lyT8piLSeWPHpr66elwQqoGlo059WmF/tlq3oz69ubDZP2nBZ0bZmPO+fWHbj38N/CXQN/Ttj4cZjJcS+Xu7syaunQIU//B1999nWar5fG3t6xe+rvsIlzjx6fvnr5k82ft5ue6Hr2BsG3lHPk6Waff+PF9xtt+72UnW8ASiUgUQQqWN6BQPWXIX4S6tfgAA9SF+Fp7m1V4X3hdYYhTv+pZN15IRY44k4TAnUigikmtuJJLfrkYYMxWjgjjSU+deOGOQYn3oIfOqghixwOOVWRA9YIZZLD7ShSj+0xCOSTPEYp5UBWnnTklReKp2VIXCL1435Bojimi1RSWOaWYsq2JlN7WtRmgG/CCCaec9LZ3J1s5klkn1wx+tCfU3oJ56CJFurjoXUimaaKS5YnqaAJymkpoJh2aqCapn4W6HVx4kiYAXwZ/4CoRQsoMKufAsC6l6yz3SSdAr7Gp2tavCqQwLHIJqvsslkB29mwX/Ga0LTUVmvttdhmq+223Hbr7bfghivuuOSWa+656Kar7rrstusuu9B6VSwB9NZr7733IjAvvvzia2uusSZkAAL9FlzvvwMbbDDCAedKsML9IvwwxP4KHOtvwRIlEUVspVSeSWaRBLJYHus28lYiZwxUyaOylClOZ4aJpXSOnqXoSpTymWrLCL1s5q1czVzfzvOhqhzPLpd69KmcLq1qzW2F6irSPSut3dNGX12c1BtCzRCk2wH9UMyEbqoj0fl5LZjaYC8ntkNkV2q2kk5vzTbXaFLdl9UQMv99dt1FN601z2+PVfjXN9N5991C38e43nv/dbhgjSP4uN90D97y5HEzVXnXaHe4eOiWcj552z2PDnjaWfdNtek+l415lqSTqLrmo8Jeu56ti4i14K4jrfvqovcuo93GC/l67CF1juvnUF7++9+4l878Rc4HPTftxGeGd5jSC3+9Rdk/mriPtwcfOPXqbz4+ROXbDD34uy+avKjL8+078sDvv37m7cvd+8Z2uvO1J33+Y13/jhethr2lVgWMl1SKpbINOSsxEjwKBTtzwc1ksCfSepcIR0jCEprwhChMoQpXyMIWauuDPNkXxfIlwxkezGK7EtjEbEgvifHQXj78YQ//BcYABRjxiEhMohJ1KMQhAmxXGOvYRBTSMdT15WRPSVkVS1LBk5DEJU3KChaXwjLx6Y+BCmQf9xLzKVbl7HmQG171bHc/y4mnjcdplab0JscAeg+BNINIAw5AyEIScgNcwWNt9Ji3/ElugPLbXiAX2AFDWvID5lNkaBgpszhC0nCfRNz88ITADFjSkh3IZJNA9Ebt8TGUAYhf1EaZKASG4JSWzIDNNNkYTsrOkbwpIC11BjwN4PKUu1ylL+X2yjMqj39qnCRDjnlKDUSNl45ppfkgF7lgwtKK2rTZ7TZAzVM2AHHYpMsyPedJZ+IPmgBcIwgsaQEJXEsCHkCnMsM5/8t2PrJ+OPseKVtnSXtmS59h5Kco/elNgCqujqBriDELWU9s5VMw6bRN+AgHS1kudHbSjKcIynkAXV5znwJlJjC9800DugmQQ3PIB8qZymQmNKXsbOY/u0fHBcbUIeQ85jlt+qF1wlGnDeUp73zqOEEeEwSJROlG3efOqU0vniEdTkatYlRXrvQ9LR0mrmB6n62iB6dH/aqJhCnJn2I1PmZFjkIpx1CWOhR9EI3eHaV6143AcCe8euC/xPPXlWyQjV0MUwe1yqzGMosqicVTYVUSQhda9rKYzaxmN8vZznq2WpPlSLGUSFrSMmC0pU3tEXFILCY2MYhCZFgOHdZEAv/A9oeyJdZvErBKBiSEt0UFJxe3OEaNCbe4K7PJFrn521X+C7hfcimgyGpVeK7Rj4VqbpOee1OxNqqvL81rJ18J3QZxN7jSnRJ193jV6yZQgOXdz3mj613zTbV4TK0uR+N7nvlOKr0uW28jrZtVNMLXudqlb1ubqlT75Ze94uPvdfwLqvqKE7zTFe8vI4xgAUh4kcIV8HjbW+Bn7rfDH95kiDWsUgK79b3WS3FoKOxGAJMJrd9tcEBFvOETbzfB/7Vw1O77RxbndHkybgyN87jiBw/4f+41cIxR3N0F21HHD3XyiDn8Yw9XGaQvlnJPo2li9yW5LksGsY1TZ+S0upj/wXOcE5DlO+caCxlxRB7zW9/p4w+lWcVrLkybvfrmK8d5THWecKLVfOe1YVi9g96mJ89Mlz/3sslk5nMa96xfM1MZvY2eK6Y5DeEGztaxqEbWc1ONatZGC7IcjGyiQutXWCP2WQ37rK53zete+/rXwBYhrQ9SQx7qiyq1ze2rafta18bW1fIiomqn7WzcQnuCUQzZFGVNq+NyOyfDDZlwwYhe5OKkjBytaqk3HeUydyiu/MGxpJFqVyzjVcs9zg+8sSPqQP9G3U9md4k1/W6+2ntEfYSxnttNcO/t20nyjiS9wfroAEda4qp6OCsj3s+JrzWsVo7ooQOq8a7OW61G/2IrmOGMXdmUvN/M7ajKSRxmdzvc4CMfUsLFvNRMd7rgN82znGUO8pUbuuVEejnHP4ryKhWd5ixXuMtxjnQp7dzmCx/4z28e9Iq7jeheZzO+W8wdpQsd0WA/eHjHfuStmT3sprl6w7Ne87lPvetqB5Dct073qPM86VSXOtoBvmUoa33dQEfv2a2e9pzf2+eI57ri4f7vnTr+gBfveMYD/3fGEz7f+IV8wBMf3cVT1oEdGyyuZ7vYX32bK8M2SLFY3eperb61wc697nfP+977HoWxL0ixbXjs2ST72hqsNg9vu3zkg1D5NmR+9J0fw2yTbNvL/djrHaJFcQdaud43t/8XrVj5pF5+7aIvvMDrzvehf57s6/c71ntO6tFb7/1tNzz7I9/3owve85ZXdVmWfqBXZGznZukWgP83gPWnfqHXgAWYXY0ngI8HgfD3gAzXfoOngJ3HgBnIf/T3gfYngfiHgPHnfx1YgSLogPfHgfPnYAR4gQYYg/lHVS5odzBogTWIgYc3gu53gxq4Y5nHdIUmchSIcBO4gCrYgyzYf0aohEhYgoSmf/KHg0J4gFNog+Z3hOingybIg/vngxu4hVDYhSsYgSHIhGg4hvV2fhmGhSdXhHqVd+yxdyCYg2cog044h25Yh0mYgpgHhxgnh/RDh2Hzhy94hTT4hTPohVn/KEBSGIdUiIKJ6IFqqIfVN1uCFUEN03pwtX02E3wEcVib4Yn3IYoDUVm/t4qs2Iqu+Iq/hooCwSuyCADDN0PKFm3M9my7aG1PhHsJ02y92Hy/aGq6YX0o423EBYoMgW75IX4oAX4kQ37lUX5c+IaL+IiNmIc7aD3WWIbY6IiSeIJPCIgRIlxw12+mt4Rh2IRD943mGI7cyIh7WIh9eIjwWInsWIVBaIntuIaMl49W6I/8eIeKKI6DyFEC2Y/7SIkD2ZDlqI9IuJAGSZAOyZCBmI3jKEAUKYYHOY/aWI8DZYhf15HuiIeX2I0iWUuUV43lkY5L52gkKXYamZDuY5IA/2mREfmQGYmQmic+OImJKPmPQvmRKUmP7/iSMylogviTk7iTGHmO3/d0hDiS93hjPFaUOjeVMJmVKpmGRPmVaBeUYmmUYYmUQ1mQHrmVZImWZqmWJ/mWF1mRM4KOS6mOXTmEdPVKbRmSYAmXOQmRfHiNwtcwtRhYt7dspoggi7khh2lrpciMiPOYsFiZlnmZmJmZ8NKJtVV8CnB8xaiLwciLo+mLlFmaxIia0xea2FYekrkxVOR9vwGNIdF90ziVtHkRtpmM/qaUZBiPkNaURPiUgwmOfhiJNgmGgKmVgmmPhHmIyOmU5FicwGlxwrmXTWcnVEmczmmc1lmT0gmJQP9Ilz0Jkhu5jUfpl2xIcXcZkzAXctQpkVIZncM5nd1ZnVipl9TIHOO5ls1plc+Zn9e5n6pCn9hZlSzZnl7plltpoKNmnsmJnmepngD4m/Ipj+l5nitJTFeZG3bon+WZoRG6oWPVkvxpoTxphiIanhK6nGXZoP0ZlzoZnymKoROqoT+IolFpoy7KoP+ZoB0ad4hYo8EJnvWpnHMJovMZo4EZojc6on+ZpDJal0O6o0Xqk0faolLapEuqo+Spok/KoiSaY0HalwS0nfYJoN4poEZ6oMa4bJvYUpTZmFBCp2Eyp5KJUXlKfYClmX76p4AaqJh5moY5W4RqqIWKqIoKjBf/45LH+KiQqiq/MamOKqmeRKmWGqlbg6mZ2qmbeqmVyh2c+qmaKqqhaqp8eaodMqqo6qmtSqquepOq6j2smh+1uqqzSqugWqq4yqu9Gqu/CqvCqpC5Khu3qqu+iqzAqqxAWaxEcqwkAq3G6qzPuqvLOq3JGq3UGlDSWq2pmq3eeq3YKq7hOqy2aq3myqyvuq7nCq7lyq4cua0+0q3cKq/zaq/tEXP0eq/uWq/9Sif7yq/LE7BuQrAFi68H+68C26wKm7Dk6q8PC7AICyj6OrEuY7AUa7EXq7E3VrENm7EfC7IRu7Dwqq6yGrIbi7IpO7L5yrE947EsK7LparLBWrLa/6qyJemywoWxU8KzKzuzEuizL+uyfSG0Q4uzRwtFi7psskiLicqoSyuaT8u0uSaoVnu1WJu1nDWnnXmLFJOLE8SnhiW2lEW2ogV9uIi2X2u2foWMWYR9skm0O6t9y5h9HpdyaFqz7RqzPLqlzBmFTPq3Dgu044qgHBqgHlqlXyqzNlu43Kmm+Jm4DjqVRlu08HmfF3qcgfuiJLu3hJuWfsu5gOulSsq4ntu4oAuVi6u5pDulLYu0lmt0qlu6rNuGiBu7fNuzlwu5mQudm+ujr5u7P/u4QHq7Zgo3M/e5EKu8M4q5ROq7rculpqu31Kuls+u6XWq7a5q0zNu5SHq90v9bu+wZpHMLu3i5lMcLSjBZuQ+6ollKgr9LoRJrvlO5oPJLpZPLvpS7u8W7vUKav3K7v7JLo1YKvdobubjbvcFruCWKvr55wL3bsQEcau7ZmycKwc8rwfRLwevYtpqYepwYtWHLmslHws9ne7eGQVWrtSzcwi78wunCtU3kmaB5qFB7w0y7Q6k5tfLitmQEt7epv/qbm90mwNlpI8mLusurxM0bvsOrhRhcwBosvFPMwPe7wAkYvYI7vTTbxWOKo+8KxeN7u+VLxTRJxhZcoPELxkt8um78vbT7xOKpxaI7uEzsvdYbx1WcxVG8unLsxY6bplusu3frdOs7wfxbx1z/DL90DLx2/MaAnLp6zL18PMb+W8YKDKYZTMli/HGHvMGJ7MiLnKN9PMkJfMdYLMiKTMhHDDNJDMk3a8ZMSb5pvDUADMr028GPPMeljL2jHMbV+8VQCswt2MhXvMuR3MZw7MusbEZrPMzKnMzzC3VSzMm8bMkIjMmorMnVfMqdjLefLMvtG8HWXMy97MR7DMuBvMzoXM4ghHoho3oqLMIlbMOKuacmHENsS2ww3M/+/M8ATS0yLEQ0LIz2LLX0fMKqmbbDuJqH6cMaA8S8KcQBTMTgZtERQaC2/Mxius7SjMfCPGQmqsbGzMbTnMm/LJfgO8gGjM3k7M3B7NEhjWcj/73RJQ3NJ73NKd3E/YvA6csQHuWm6kzMsWzF9lXT3HHL4mzERn1hDnzBLr3JMP3RqczOPf3SPx1LrxzTRC3TUbrSq9zSntyeFD3AztvNWR3U2jzU0TzTMlmmDxzV3bzWXK3SBOzHX6fUKN3MOn2lENrR6znWtFzW1AzWoiy+go3GhN3UIv3UJH3OLJ3OdZ3TqtzAcA3ViX3JTN3X34mlQn3NmZ3Nm83WPH24/pvWW03VyFzUxGvaPh3Xof3SdK3afWvYxyx7DTNULKXbW9EA+yx7vy18wT2Kw52KCuXbS4fclBnQzN3czu2nBz3CTVvcs0jdtmjdTpvQ+pzPfcrdhv81mwgQ3uI93uRN3rDiEeWd3uo926xN2vM6MOod3+KtEfAt3/Gt0dzxmhKdRezt1V1NJBjNfdI40XUl13jtzv7d1l9916aM2nlL23zt3k7ao7eN2OBM1ohs1rwr1Q4eznv9x279noWsnR7O2ZKdx2d94P/L0e+b4JQ92T/q2lgN2xc+2Ble2AzOzGJd44p944xN045t05Ad1ife3jA+4aF72DtuyBiOyzie4g1O40xu407+429tvFJO4k2+1ByM1Pmh1yaO4P9t11Cu43nN4p8N4SDu4jFu2ViO2Tyu2Yvd2m5+2lmOxA9u5Gpe5Ate5u284jcN2Ao+5i+O4huO1nf/7sp5zuZVvedn/NctHthxLtpzXtlkeruUydsfp+lLodw8LN2fXsJeg9yjjt3Pfeqonuq+F931rN3dzeoKHeqx7urf7eMSfrAMkOu6vuu83uv9/az1bd/pTd/Cft+1HOZTzehuQm4KRr8B3hC7yd/Hfus73egkonT6a78mTaVGK1xt5ADgHu7g/gByNdpWLeMcbutH7iafAgHHNAHllRXZrp/Tvu7Vvtp3hx0TQE0OEG/zPqD17ujJTug+IikOQFLQJe8BrO04ze06O5VekgAkdQARwG/mbuhXne5VTu2REhMUMPEHQO4b9+9tit8cv+aDTnIw8QAgfwDwPvILT+8w//vhYq7yL/Hx9CQBFnBK5K7wuSzz6Brm3r7yp2RPEmBJL+/zS83wgs6WD88kMIHzhTQtliTySk/zj+6+aR60T+8pdmFJVF9IFQ9xJO/ZJm/vES7w1nHwU58QhpTwv07mh67ila72b5IA+05IYX8A/W7xZQ/pW5+USDv0MuHueu/2hBTvcV/adf7aG4/2LvPt4D4t417uf6/1Z2/3AaxJ02Iblx+mkR6QXa8bmlQBAlABnh/zAD/zQg/xdYEBFYABqf/zq/8qsr7d0+3dZSsAHEB7jmXqun+2yX3cwK/qxn/8yL9ZsI77tL77y//qtw/9zS9a4F3s5X3eJWH9w37xev/e/cYa7Nof3sQe/uOtE71+/ugP0Suz32S0+CCt7IDy7M044NLO7HDS4VuO9QMvyX6eKCWH/wARQOBAggULChAAQOFChg0dKkT4UKLEiBMtLqx40SJCAgY9fgzAEeTIgQgVDECZMoGDCy1dXniQEmUCjRcRkiRJ4CZOkBlrPvT5s2FQoRgTFnUokudHpUsNmpSJ0sEBqlWpRohKE+lQAU496uzq9eDRrUbLMiSKNG3RpmIFtnULVWYCq3UPQJh71qhbgmD5liR7dq3QwT8L14QrNrFXuSkj2LU6IYFKvRDD/vX7N2Tgsoc1erbJeetip6SXNh7wAHJdB5Qr78QMmy//6I2i1dpmi5uwgI6aTfNsnGDC6rqTB2jVK9tt5r+0JzqnqNuwdMS8NW/u3VzASZQQiNelMLMyAOVimc+m/jl96PHQgVr3DV879wHDrVqQYAHyZOSCL/M9L671amtvwOcMjC479BQUkD7vrJJAAAnsCu+4Aq8LUDEE3yuww9fkW/A61OyjCiEB7DKuv87+W648xjZMCkauPEwORAEZ1JC+1KwykTXXasTQxdJkRItIsz4sEMcXlRxSxwEoqKrHqiT70b8gWdSQRv+0XJHJ02zMMasoEbIKrypXvFJEIy3jcrQ1yQNzSTWdHGCqA6Q8AKu8PkwzvjZv+zM3L4GLs8mo/45zwAETE3XAuDPd7FO7QHebdLpBcfoN0+0OTclETsXjU7MMX6y0ulLVu5SkTFXd9NMKBKjgUwtDjQ1LUpHEtcYk1Ux1JNSiwqACDGRNwAC9DECoAVGT/asBhIw9C1kBoC1LWmq3svbYVUFytlCeuuUgAXHHJbdccxMwMV1112W3XXffhTdeeeelt15778U3X3335bdff/8FOGCBByZ4Wm0NjvbZhBGuVuGFr0Uq24cPhrgoiastEAGNN+a4Y4+NRchjkUdGwL0YT2UPSQNIZlnjiFZumWSTx+Nqgb8WKFkAm/laYOYiUSbwQwR2dqtnAYa+2WeaLYsUPaAPfDpBXv/VXBrIZW0dMmoOc/Wv1568JbRqK6+mmusVtY7Ra6bA1lRsNMn202w30eZKbY+2/dptSOGWVG5A/RZ06rj13q1pAen+GfDd7H6KbVYJz83wLBWfDnGjGB8Lc4KUXlrIpUbNmnJTRUdV8L4hr07yW3UlPeUaNQcM9rfevJBvp1sPmvXXTb8ddfVUD133LXE/UHbsyvY9NOC/tJxN4qWOz3jOafacJ9CZf35r4bvk/fDklbfd++3nzj7t7if//sDlwy5/xvaLlN5xX2mnFcDq237/yPFviz/+9GtbH/72l5vmwal/yPtfjAL4uPw5b4CLO9/qEsiVBc6vgNPDoPy+5r//ylRMKNJSVq1C6JZuefAnF8OWwxrGsBRq0CPdkl0JD1YwGtbQhjfEYQ51uEMe9tCHPTRhTVAYMRUSkYVGDKJGhmixIiKRYk+MVsZiNjKQHW2KI8vgBd/0siuKjItd7Nj0CkQ0seAMIWT0itG0qEWk8SxkaHSKGieowPChb3gPrNyHDji4Oe6ljhI8WwPhtKvoIbCPcKpg3gSZRT1GMHiHROQfHxlIPI5ud4Xk4yHvl5NNWnCRWwTlHk8HyU6C5Hrsq6R6tCjK3pESa58r5drW+ElWis+ViZTlJ1fpSOxBMpK1MiT5Uum6rvESlbeUZC/vuExKFhOTo9TkK60Xy7uB/3KWl9QOB31JTYOcUoDMFCY4+WfMb0YTl9W8pjgD98xWmjOZx2xmPMPJPXba0p3AzOQ89fk3bIYon3PkZkG8ycBh5k6dEKynHe9pP2mWc58EpCU5CYpMfELzoZSKaEIB2ceA9qWjgEmnPMep0Uli62AjbJEAUOoVGU7MpStM4kWkFUO8vbCJTPxhTnW6U5721Kc/BWpQ2xVTiyzxgzc9IVKTekScEnUiRl2qUyUCVSEqVYgh5edB81hQqOnSl4NsICO5Gh2sZjWazROrVi2pVlUKsqwYHav22EpMkUJ0m9aM68m8mlf38VVsab3oVudq0LrCdbCoA6xZA7vWwgq2sf+M5SheD0vWvU5Wro/9XmLtalm9+lV/nO2rK9EqWdAmzrMOLG3VNGtYzNJ1sW09LVjvOtq3sva1rlUsQEl7W8LytquxXW0CgwvZ3G62tb0trm11S9vKHve3qUXtWcO6W99SFrjUXe50a+vY6l62u3obLmyhK9vxhle80j2teXFr3O+G1rmQU+9z3+vd5HK3vsSdYHytW17ssve+5zXpS1soVYdQVYlWPTBTj6rgqEIRpjMUaoQlPGEKV9jCFyYYgQuMYJly+Kke/rAACjBiEpfYxCceMUJQvOIVm8TBA35xxBgwYxrX2MY3jm57P7siFve4xCr2sY850lwdL8RRspL/FQP0S9/cBFnIInYyi4d83QIeGcmcUsCSO+ufKEsZyl0+8ZT5W+VDOSACZ0ZzBFoTlSz3175NBnOYvxxnEot5vlu+c0OsrJrVxEQmbWZujehsYiAPOsW8IfJ/L3Jk4RBnAmzWsnvdZOg6z3nQdi5yjhVtkSM/iDhmQgmgtfshSh+61AXA9Kblm2kAOIou3zlACBwl6vSC8tSFNnSq/btr5RbFUVCC9QHWPABaQzeDt7Y0nXXda2a/WSjG4XOwD2CcYud5x5MuNa4vjWgqC9I4drpPhNg1rGpn+tjZTnacl+1sduP32TMhEVXEza5YlVvVTN4NspFNgETz+ieO8jRVvfBDb3JH2rRcRve++93smhz5Md/RU6gNfu3b6Bvd/O52bI8c7T7/eeKahjOlta1sbo/Z21EBNmQqlBJ7+xvAFU/4xRfebo1Y+dX7gbSb3T0di4u85NamuMtrfqgHQMDoR4eAnz2u85eHPNfpBvO6dz71pr/7yrJqOcPXm++Y+xzjJtf41bH+cfLyuOtP/zrQQa71RYv9U1mnOXKdvm2FZ3y858L7uTgA4qnyfcMMrirUuzxydf/ciQI+fLUCAgAh+QQACgAAACx/AFAA9wCiAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On9AADb2ecBeXnJ2dtKQInO5eVENn5sSEiLOTkxZWWrKSkrAABMWFjPFxcAFhZ/Pz+qAAAAVVV3LGaQJFOwGTu70tLEEiszJ1poMnGiHkU/X1+fLy/aDBvTDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEBFjIsKHDhwEIJIRIEWLCgxgxJiRQsSPDjR49gnTY4IDJkyY3PEyoYIDLlzBjykwwMiTFixlz6tw5cKJNihIV/rQogGfOmkMbIk26cGmADiijfnDIUqbVqzQFcGSqtKjRr2B9cl0YdOxHr2ALOk26dujSDFGjdqAqoOXVuy+zbjWLM63fjGLHljUboO9fAG1/Jra5dETcqBm62sV7Vy/hwmgPa0Yo1Oxgvpn/Lg45WqTWhhoex5VMGa9lwoY3Hw7M9fPY2H5Ld9RdEanquBrOTm49k/fN0LJzdxZMmynutMaJ7r19euGG33EbNK1LHGv0lciTh/9dXrs52/Djp3P9TncviKgXJCScn1CCh+3Du+dl31W8ZvND2bYeel/xd5Z6zlW3UFTy0ecgfvoVpyB1/s1GHlMCJiibgdsheN50qZ0Un4Pz3YdZfhG+BlqFol2YVIbnbTjhgB66NSMJ2B0QmXARxqQihSxC52KAACpGoFEcYlajkep9gN1crPW434waBjneZTDaKOOSjFH54UPXqaZdlFIO8OOAVl5JWJZGbnlZkk6V9BgI4KGo35lVpslTkTax2aWbsHlpI5ekcVemmXAeqSdBfIbkZ6GAgkaoaZPuZmiZeMa46E6NevSoaZFSV2lvghppZ3eZarmpTp129KmlodL/+GapXZ5KXKptbmaAZga0StECCvh63K6H9VrXAoEqgCxoyiabwLPQRivttFk1CxqxfxlL4rbcduvtt+CGK+645JZr7rnopqvuuuy26+678MYr77zfYuuXsQrkq+++/PbLAL79BixwQvamZawBCBCg8MIMN9xwsAIg7PDEE0NcMFgHJ0zxxgpbrDHHFFvMq4wJHDoAAwmVjOlz4yGwLFcLJOSyWTEryqkAM49Vc84ws7xqTwKoXCbEQveYVawJjnoc0l8G+vNRQZtM9KFHb5ao0kQxPeisTwMW9aFTr2wzq7QWijV4WjPJddcHpSy122Kn3eXZdMlt9tpsq/X10HBL/1n1f2VTireFdHdV+Fl5G9S3lGH77TOSgVt6+HZ2C+504ozuzfjiKT6+Z+SkTo5Z5ZIPjjnnETZutOc3i54o6aGbnjjq+qne+dhQuw760lbvLp3sedPene13sk627rrDflzymANddI/Eo2p87rMyDzjywLMtPHHR3zq919hffn312Xe9fWvdt/Y34eSLz36g1jd/PmXpU7Z+i+Gv2Hv+QDaPmObQm19lvqcR34Enfvhrn/78J8C71M81BGybAdtTPsjxD03++9/zUtdAq9xPORfM0/skVcGfddAqDxwg7sCnwP6NUFQlXNUJZZJC76ywgCHU1PjgF8NNzTAmNfRgBP8VN0HD9ZB6PHRftniVMmo5cVpEe6IUn0UwJh4rWS8b0MW+gq8sJsha1NmiUbRFrzKa8YxoTKMa18jGNrrxjW0UI0/wBbKNIYCOdaxYFYtFsI/lkWEe+6PDRMbHiDFAYIjcVx8F+bA9LrF3XhzKAhCQkEj+pGYy4hlTdmZJm2CydwwwWVY0mZRPMnBIP3kVqZT3OyUqx1bqe10GhQURVfJuhyR0pZBgaT9ZnhJLtETb/lqIQcDxEoJFRNwsUdmnYNZtmElcoIWOqUIEzo6ZjnJmf6CZS2m2iJo2tGbwsOkpbSoTlzDU5XjAKcRkUm6ZwCQnrLiZTm++UpS+lJ88K2L/y6zRU1bqLNClHOfO0cFzTeZ8JzoBas9d4rOgQ2RRQiMy0YhmLoeqMuZDxam9fQKlojeUIEZzpVGq5fN0Hq0lSFl5wCN6jZ0S4qj5UvqQfgpzoUlzaQFh6qOTXjOeOhUpMUX4zY0GNUgTtekzcdq0hq7TqAF9WlJX+s+cRvVzPIUJrv50UM9Qlalbu+rNsjolmUqVpg5R6jbBqjaxko2sLtkqpLrKHLSu9YUMdWFRTQrRkCLVrgxR6znxalWnChSuiOqrbOS4E2MRBlggZaxOuojFQEk2J/ia4hRZ0skuXTYjZISjaEdL2tKa9rSoTa1qHfRZjOCRkQq7I0tguzBC/2ZrkbQlQCBza9t74Za2uwWuI+8lo856ZJKVpJlFgUZKScrMuB0xpYWae8nnKtevEgUsWb5K2KbqFYRD1aE+gepWJHbzu0IaKVd/iVDtGpStczuq3tQ7V/Z61b3L5Qx9QVVV7xZzvO2V70XDm9HuhtWwZyUvgj+333nC927l1dNU8YvdAUcTvekhMEkBfF8BA63Bq+zvgTE8UwWT2IIaXq+B27pgGbpXsApdcXwjjMMU15fDdfWwfm3M3wdbrsU+fDF3E3jh/xL5vEb+aYBpLNQiExW8ThYvSk2c5PTy2MEyhjCQFzVhHf8PxLfM8o9PbEIhU5ilFGQyEcHsT/vmWP/N871yiH1cOjhXqMt2/rCcw3zkepLZxVR+spWjXOA+57XKHQ20lKGMZEFnmNAbfuRttQtZCrf2IJRlFnQtdWmDZJo6YNSiFVdL6lKb+tSoTrWq6dXpghzMigrIrW6Ha7Dfwja4t6Y1xmzNSFz3WtdclFFxr5vJTf/KujrL7/+o60lk96zC/lH2kBntZ0QLlM031aeMvHw1bvfVrGXedp53DGkVG7qwfw6yuLfMwnLf+Nz+dfRM151u81Zb3ih2d4+13TtvY3upYq4zu9Mk7TOLmMX1bnejFz1Oels73wsvNLUPje9w93vcX95zmwMeO4wL++IDb3LEIz1xdD/c4oD/87fGsw3vEZ/cxQ6veOtWDvCWI/zl6gZ5wmusbyzbfMYh/2vMGf7okZu75PEm+rx1jnN7U1zpEL831FFuIZX3fM4cX56XP55yjHfb69/eeu+GLvFBG/3dSHe5zHPe9aDH+ep8TvvN185laHPG6mff98+1vHNMW7HVBHGsZWFt7OOEOkGAH8inB3T48yReIKFdteQnT/nKW/7yZnw8AF4LW9nWRda9rXXE/Pjr0YMe2GPktSB9vXrUz3HYOqOkAAr/EOm2iNkh4SSxe4f74zp7k8omeyqnbXapl73oxif5OBV9/GvT/K57H3PTJWxmvCf/6MV/evMTvGS3WzjvPpc7/9D7/lfmKx/52j+/8+G+8Sl3n/xvBz/Woy9w+N+5+mD/N/TFz/fpExz/3qdn7Mdy/Cd9dEd95od96GdyB8hz8hd3y/d+/idy14d22ceAUwdoEtiAFJh+Crh+D9h+StZh+fd8g0V/HReA4oFnKphxA1hzBVh/E1h+G5iBx2OCMYaCWudxTIchxLeASbd9DIaD74Vj5WFwdJaC9ieAIUiAEUiCLfh1URh2PNh2bzaF+neCMaiEM5hdCWiBQKh2Nuh0GCiEGgiFS0huTQiDFxiE6jdGVvRYEDMrmrd4X0R74FGHnFVZ1zJqmPeHgBiIgjiIq6aHEfN3hWSIioiIiciIk/9Wh1xnIWM3iYATiS1iicqRQf+DiePBiZ1IiZIIipe4TJ4IOaVoiqKYiakoJJpYcKv4iZX4igJ1ip/Tina3ibGYi6Goi6PIi6p4SrRINsEojLKIir7IiqRYjDczjFDDjF7jjAVki9BIRNNIjcq4jNdIjMCYjc/Ijd14jLC4i+KIUtWYOeVojt4YjekoQdK4jnpzjkADj5whj7i4jeBojOP4i/mIjPeIj+TojujYj7UIkAG5j+H4jwKpjQY5iwQZjw05j8mYkM34kPW4kP6oj73IbxL5jRY5kBvJkRkZktckj65IkSX5kepojx2pkCLJjyvJkhgZkw33kCf5khOJkuz/SJH0mF812ZIHKZM/6ZIIaZMpSZRF6ZMMiZPWODKN2JS+dYhO+ZSLGJWiN5WTRohYmZVauZVc+S6GGGu55XlgKVxQ+YiOKJV/R3qtZ3q85XqNBXswI3t42BC2B5RJaZQS1HvR9XulFHw9+CI/eJdIeZFtKIZmGGRfqHd2SZhBGYZzN4YIWIOHeZN4uZRZ10otKHzNhISV+Y4mSYWZ+ZdEwpmD6ZGd+X0VqJhPeIVpWJGliY1JuIOhaYVH6G07eYtS2JqamU2kuZim+Zo3+IL7t5q1CXa3iWZGVIVVJ5lv+Ju+CZuX2VLK2SKJGX5C+ZwwWZiPOZmRiYZdWJDASZk6/4iZuimaw9eb15mejQmCqWmdicacH8iYgomdZOiG8Vlm1Tl/9AmS++mA7amfxOmD6Lme8lmgQyicWhiggDmg86meDeqY4/ed0QaArdmT/dmBZdicZ8iaEgqRSumZsUmeHZocLFihuHmiyHkgXmaIchhZZ1mVL7prV6Rpg1dIXXmjOJqjOpqVXylrYnl6ZYmWVCmjiqiWfxR6RMqUgDOXDIFcs7d7HwqeF0pEelkRdemg3OedHGiZ4cmf2nmf0Ol+WgqZRzmlIBqdbEig+AmfYPigbmqgM2ecdFWcU3icBwem2fmeY8qdZYqlzvmlbQqniMmmqqmmYWqmqDmifkmbAv9qm5+ZojmIqBOanxBoqHn6pgdap3PaqHIapQ4Zok5oqXVHqSLop4dqqsGpqW5GpyZqp6Cappi6oayqqCj6meB2qnrKoVt6pl3apxAaqH+aq7O6q1KKquK5hZUaq4O6pxp6qYJ6rIBaqMo6qoTqntPqpaKqcK26qQvqqJ7qoWg6nNn6f6Qaqtfqq+eKoWSak6vKqap6mp8argk6rlZSorTqqvIaqcYKWnFIM3NYo2Y5pFzklpO1h30osKm3owq7sAzbsKrVo2HJeaVnlUIasI9opHnEekdKsJgFl5skl1AKr+Daq3nJpAuhe8l2i7tZTgz6rNiarmtGhIu6nMyKp9D/Sq9ciqwiSqyTWq0AirO8Kqlq+J/JmqW6uq45u6/oGnUeCKyy6q7b+q2uqbPSOZs0e7R8yq4iO7XRKoM8u4IUeq+Peqf997UkGrZma6FKq66G2azUWrNO66zBCrPxR7SlKqxQK7ZSW3BZqK94262durV8K7Mqa56b6a2Ce6J9W4RiirVu+7Iuu7SZarfmarTDirRBu7ZJ+6sGiLn3V66wGrlaS7Ixi6B+W2I+W7RzK7qby55NK62Wm7dpW6t7C5rlyaiA+66km7mc67We27NwC7urO7y46roZarP8WkhjUlfLyxQNwLGgBb2uJb2YRr2exiHP607Za4gO273e+73g/+uVMTqwQZqkCDtH1utq6Rt466t47Qt577t5mYQA9Fu/9nu/97srMoO//Nu/agu0xdqJCNO/BFy/FzHABUzAM9siJhsATtrAVwrA8bq7mVOlx4YzELzAygG64kq3AezBTEi5obumwWutrMu7IDy0r2vCJOy4yCu5xXvCiXq8cfu2LlzDkEu8chuna9jBsZu7UZu4kKokYoe7o4m4FDzBVJtmVkudqXu3Kcy1EqzCNCy8TwvEeivEZNu5WUuDJfyzUfy/xmufOEyuT1y5OnyzYWy7I7qyrhKYMqzEmlu3KwzGP3zEgZvEI7vEydnEG3zGIxzDaZzDPCzCPtzCl9vFKP8cx3vctVxotm7MT3A8yDC8w6lqyPOKul+suoLcyZZcn237wvWKtr/byFMsxWMcymU8yhycyYyMyq+cm21suLyJxEILy5Nbx5w8WVbUvALqy0OxveObsBQrepOTvcccv5EXvszczM7czMVsvhU7zTB6vo2lzNhcvhiDr7csxrFcFAicwPx7wOKswIVbu2OLzryXwRgcskOpx95MyaPLxyo6nd1sd/HsyWrMtFXMwjBHk7Srxa/ax7cr0HqMy/pMyJesy1B8ViQZ0Ac9uKbLuCMJ0Nx8z4urwXP8wfLcut88xD41k9+az5+s0KC8nY/7fw990RsdwgyNxv880hB9zyD/rVgRKbIkvc8dTcf9bMdUB88z3dKmnMooLcpCZ9HpnLgZfc4GTdNJPZ5VW9ARjc9BfcoS3cOu/NNCm9Mm3dX++dKBzHY4XdVrXNO3KtZAzdJWzcaQ/NRCjdAlXcknHaFmG83k+3ivZs0Fq813bUXJrL3Z/MyCPdiEvaN2Tcx6jVnZfNjoy9eIndjJCzjl3L/6izOTPc61ygCavdmc3dmezdXRGM6XTb/kPNr2q9HjAcEgm7IyEkp8dZsWDBEo+2y22MqnCzmI9UFvLcttbcTn6W124gAWMNzEbQEQ0FNkzc9kbMXLesPMDTUoMgG/UTS6vdZLXduAfMhj5SMU8BsP/6BVoM22Rb3KXuzc/vxWMeEAOSI01b3G182teDyFw5EAOXIAETAlt/ne7YrFszscD1DfB3DciZXfhIvdm9zQWPUSEADgB0ABJdPe36zfRii7pfw/k/Hf8CEBFxAXxw3hOz3Dy33eNpzIKS1Bk7HgDCIAEhAVDj7gtlrg8P3bYOffUTEfUSHgHp7QX93TuzziFK7ImTPfNZ4QKCEC+P3iE43aAmXbFO1QMKHeJ2HjJ8He4V26WH3biPzjJU5E+ZEA3W0SUn4ADgDeyZ3LPI7gVxzfJooiKH4AUt7iR167Et64JG7UemMnE+AADjAfei7gca7UML7fak6rsDQfNkTgSf/O1E584DCN3ndRAQJQAYeO5Ffe5H876P2NFxhQARgw6XIe6MXSy+0FzD8hzIXEAZolRYvt14SBzK0e2IUd67I+65jH2Nfs2Ldu63ut64qN67k+zK/He6Zdv5U97Kdd5jo+z6wo2qNd2saOE54d7dLusaW02rTt1rvdzikb27WXEK6NKZH8US374XJs3YSb2+GuUuOe7B5N7lQc4qSCIgnwAPRe7/Tu53GV7jU1yey+yO7ugpVeFTGB4XFx31ql72nF73Gt7Oae6F2OHRNw8LTMsrac7WY9K/kRAdgB52aC8A0BY5Ze1pSOyRCi4PU95vk+8W+87gvf7v0O4qpsGpP/4eUArjIJ4PGBpfA6/fLlTtR0fR6TAeX1bfA3r/KSzPI73/L+XshgTSZfLiIa/hgPjvPbhfRezfDufe4vId0oIR8rHhXf3fFGL+4Vv9YXnywwofFRTuRRYfNUT1FWL9dX7/ILfeZ1AhNt7uZsfxIoL/a+fbh57NQDXc/MMvBrLwAowfFF//e1HPgWv8W+ayMPD+Z7H+ASz/gU7/hmD/mPDPQyAeVhbvB58fYgX+VLr/QwP94yPxN6zud6/jyLf7V1Tt6nn/S2r61NzyNXYehYQfo6P/e1D/yp//OS/+iRXhm+H/dY/9GcL5ugdheb3ulYYYikfp7VHxKmbrHULM1P//nXf43qqe5EtD7+5F/+pcbr0evrvY7+06v+6w/svw7ZrjW/xl7sxk7ayI76PW+KzH7Zzj7sACFAAACCBQ0eRJhQ4UIAAhcEgBhR4sSJCxA4pJgx4wKBDD0S7PiRYUiRCkmWRCgQwUONLSFyFLDSpUuYKG2iFDhzJoGcOluevAlyYFCDQIMavSmQgM+fApYyzYiU6NSeUCfyFGCVolScQ6c29Eo1LFGlWieWNRuR61ebVdNiTat27NG5Seu2vdv1aVy0fPOy7RoXIlzBa0Ua/ojYo+KRTgUH6JuWMeCFbs0S9gt4ssm/hzsndlw4dGbKXy1rxSz58+LVjTW3rjxa9f9e1aVNZxWc2uzmhLxTwub8mvZu2cRti32sW6vvg8yLAu8N/ffw5cWrHyeLO65yq84Lehf6WvjjyMax201+urv05uyfi2dbvjr19efR51YPFTxY+LfHi6ZPP/dsy48p7gTsT6wEswuQKfnqsy8w/LSrLb4Bw7PwP78a9Gm/19Kj0Dz/MiTxNg51ehDBCEsq0KcDHbyQvxIVtPDEmVKEcUUWQ0StRRRj3C9IIK1bz0aXPLQQRPKGXJAuDWdbUkfPlBStSbusxMvIprTUCMnbqCRtRDFpNHFJLqOK0YCvDBCogQndTKsBgdScik0B6CTKTjyD0nNNHHWSk0imAt3zJjv/BUI0UUUXZbRRRx+FNFJJJ6W0UksvxTRTTTfltFNPPwU1VFFHDbVQm/qsc85U7/ST1VVNRQnVPFV9tVVYS7q1Kyx33JXXGbPr1TMpQQuWtWKN/dXJZK8c1rVldX0W2jGBjdbXZoOrlthstZ1W2W6ZvTa6Y2Mbl9xtkf0Wr3CxTddaMt+ltl134133vXPNlVfYe/GF19t6v2MyX277BZdgdfeNUEiExV34t3LZNThchQVGN+J5/aU3438ntlhfip39mF+N6+V45INDhthkaTsetmSMXy5Y5Ytjhnldl2nG+WSWK5bZ438xRNnhhtt7mOGghf4Z6J1Frnnlngd+mueN/wNemumcnW56Zp1/vnnrrH2OGuSqU566aKKHtvfos9VOO+musb5aa7i9jhvsstFWOmyr6eZ7br9xtTXwWV0dPNePZOWT1sIFX3xVUh+HPHLJJ6e8cssvxzxzzSE13CPEDVUcdMITF6AA009HPXXVTRdoddddF0gBxknvnKHPT32NAd1357133/OuG7TXh0e9deKJVwpv4IOXUqAEBoA+eumnp54B8IQ8HvnSs389ebYBNhs756knv3wFrh+S++63V19178c2Gv7zxp/+gQjuxz8CB6g//0Ls23cf+wB4uvfpjWzM0xH9oheCAzTQgQ6EwPT6pzywDDB1xrMg65xCwf+3SUwAz4teAijwQBJGQILoe00GCShACxbwa1B7YfM+KD0HkNCGE5DeBL8nFBVqsIcFcCEC9/a3ZikwATa0IQVAOAAdyq89P8RgBoPYN7lVsYgzhN4DkGjD/UGviQaMDhRZOMApEhGGQkwYFiGwRSSC8IsxbIwYxUgADlLNbVisIQkvIIFGYYCJKLSQHHtYRiuekYpXBOERScjHRlXgj/5L3yDHCEBC2g2N8eMaFgcwAT0yUlEV8OMbL/kbQaqwkoY0o9RIpskBjJCNBzBh9ER5SOFJco51DN9xFDiANb5yiY+sYymluMEdyqiYr/nlALS4RQucEJIptKUk6VjMDl6Ea5cDUGQbnRnMaJqSmE5sGxjnx0roQWAC50TnBCK4TWpOsn1RbOE3xbk2cBKInOUzHyBvI8x4TrOey6OlDJOJT/LNMpVx7OYw/TnPcMJxRdckKP/0KRZ+klGeDj1gQG3XqgR01KMfBWlIE8CB0J2qpCZ1p/rgadGFiq52C7ldrE4aq4AAACH5BAAKAAAALJgAGwDeANcBhf7+/kc8igAAAACAgNDN3Ds7O+no6f0AANvZ5wF6esnZ20pAic7l5UQ2fo43N01YWGpJSS1oaCsAAKkqKs4XFwAWFqoAAABVVZAkU3csZrAZO8QSKzEoWmgycaIeRbvS0tsLGn8/P9MOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABcEGEiwoEGDCxAIECDwoMODCxYCmEixosWLExciaPiwY4CIAjZ69AgSo0mTCxkMWMmypcuXCTRyHAlR4smbFGl6JLBQZ0ebOG8uJODz4dCiDoEGRSlAwcunUGMKIIrUoNKlF6sa5ClAa8GrWCse9TpwLFmwYTM2hcq2pVSqZwWkxUh2INe6AdCmNeuVr1a9YRc6bdv2LV7AWPHerYt4qd+qj5E2DiqYcOHIRSfjVNyTsdy5FjH7FK1Ts9C1lqOSpmn6JOeunkGHnnqYdmzZFCunfmr4Nm4Ar2v/Vgu3r+24wwHo3u2yN/LhwX3jXj2SusfWTAczd2v95+ff0Z///+5u9Ljx5Mu3r3R+HnrdxeKnm/87H/J3+drVD2BPP3n49uPVJ5mAmd0nW3rq8Weff+91Ft+BBI4WYWkGgobgdgoOyCBZ8AEoX3H0gbjgcBcyl2GBG3rVYX8kTsiai9VVOFeJu50oYYparThigCIO2COKLeaXIHlJyZjWfyzyWNuPNwapH0s2UohjVTpq2CKTFGL5InqoPRnllu5x6KCHEGoZo5nXGRlYl/p9GeOUSFUJpJKeoekdl0JiSORB2NHVIGwPWghjmnaWh+eT6+1plZqJkbWAAmPSZ8BvBgg2k30KXDrgpLhV2lQCoIYq6qikSpWpZ5zK5ulCrLbq6quwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa2ysChCg7LLMNtssAp4m6+y0zkIqQKqgeWoAAtR2u6y123rrLbiULhSuuNSCyy2605LbqaY+JbQQvDqV1KJIXoGEr1b2BrhvVfrSS1O/uCG544dLCndlwtLJZrCVdMZVaJFcTsznoHeGqWKkB5fJcKAzYlyexYvCWZScTUZsHMlfMeqYyEWyTFCfWf2psMoh3oxwnTo7bHPDgspcFswXV/wxmaA9POfOEvcc9NFJFvwzyHsRvajQeblMmdUtY02zRUqnzPTKTofsNdcza73Z1EibDXXHT/MM9FxhS7mw3FSvefbZJvv/hLLdONvH991Nz30k21F7jHfbVe9d9uFiAsq43m9DPHbOhodVN5iB+/g45YsnnjTicLsdeumNV760z5F/jpWiXbv+suOZN9p67bOrLrbihedt+8aSi25675O/jvbQg4NHuuW8k4371rT7vlRdj3K8ablNCfziqXFhO1e02sfIvXHep7Xqseinr/767Lfv/vvwxy8/rtKy+2y09ldrLvbn5v+tuevyn7LUJcD/XYsBCkigAhfIwAYCsIAD3N+7HKUQhjjqa2IJSfg6ErALatCDKkGUVP6FFIKxDniyg57uslSbPJkIdmnrm07+xrnLCU53HBAAB9LkwhrBcGgypAkN/99EOOcRbyANOMABJCAACSjxABq4WA9T46Y0BXEkQ7RiETF3xABsQImseuIBOrCoKVqmihlT3u2kp8LTea4gGXhiGJ+4gTIiaj8/zNoVd2K91cWti28kCAjkuBAxHiADLTMjYdBoKI3lqI+7+6MRJ4kBMc5RjIm8IyMp5kgqQRJwNgzkJA15ySdiIG2KvMzx9NjJOH2yhs3johE1QMpCGvIADRhaKtmyyaK18mSvJGLnChQ9D9RSALc8ACo1mUcM5mR5fhzeJGVpSCY60ZCn1CUzV+nMiWxOmKEk5tEq+UQLSMAChgRBJkXYTLXd5JtaHKaEojcQESRTjIhcJju56f9O10AzktKUpUA7cE8l1nGdXmrnHjuSxTSGc55vo+U9c4nQNin0l34LZjwfysLTJTGZHpDiNpMntTUWL3duFKdndqkafi70IQ1tpDw7CsgCsZQ3F1UjCp+Hk3bSkz43hUlOS7pTNvaUnz/F1EhTuLZ8Was25QsL+Fa6Qe+MD6ilymqpLIUq7M3vq2ANq1jHStaymrVYDUyrWhkQLbW6dYESVNUDIUhAusY1W3MtYF31etfvJeCODFjIXxPaT6aQsCgdzFc3lfNBxTaWX4sd7JOsJdkhFRYjPmXqaZLKPNBUVj2U3afRUgrQ1JEWlLL57HZCS9jR1rS0oHstaj17R9b/WvSyF8ksTze7QljSFlG2taxrp4m62BK3s3NRLXOCqyfczoaz0TStbH2b3NoKVrRbvOFuT6Jbo95Eubth7gudm0HowtZ45p1tdYF73dZmV5TCk+5xoxsW8KZGvD4kb26Qqlnu8ne7JrGvZfBLxcV296RtnC44U2tdAQhYlcMVaHHR29sF/3ay7b1thLXrXf+ml7ppeXBbCHxGA/+3w0z5sIXXi2EHY3emLyJpLDmMYJyImC0kXqSJVbzRGcN3wku5MVRyDOH3qhTFmD1xjb/bYCELVb/EqXCPJSlh5Ia4yS/maIz7m2IpO5TB7HWxe2F8Ji4nmcdf9qxW1wwqyrJ5/819NR9X43JV+0QVK1Olc1XLc+elnO+sgA60oAdN6EIbulX1EyC0BANBArhLrtcKoAD3Ouk4S9VcCHzrW/Na6WtRas8QqSCoEWLiw8ZLJh7kUggJa+p6LRaeaQ4ojeO7pqA2Z6gnfGTwgJzg+Z73ZbbmjksxOkONxlq+Vaav8YINJVyPzqS0pvBpQVzrpQLYT9Dm9VHRLFP8WBvJNcu2lY2b7F9vjdmJGrZOdW3m3Co52sD+9pL9KW5lo1TBUw4ZuvGobqKy+9ru5jYnA7TvXpaM2EI0drd9fOR5Z0fe8A4KrBdO5Vlr+zQFdzbd/qleZFt83MuG+MXpXVSHn9nLFP/Xt8hBPj2OU1va+D52tfcp42eXPOLbRvnAvU3zdoPN5Sv2+I9Zfu6V21viQM+30BuOc4wb3dxNvfnIuzztoM+csDXfeL2hztuqK/3qt8065KRO9Jx7XeYh7znAf+5UhRepz0HJs3HqfL1OCebNcM5eV+1+6L77/e+AD7zgZZVo/y26KY1+NF4jnXhO+0/x33N8/ij9eEvjedQFkZcFHculVg8M1Zy/F+YJkljIQhk4SUf7vX3dcXJ//OhR/ze4n6tzX5KZUD6vyMR3znCIrr28tT/4uj2567J3PeYpX7rvZ6/71Cff9UOH/fFZ//KWb731MKe+1bNfbuxbn+zS93D/8Fu24ehzneSyN3nAxx9DIy9f/WwH//mpjnzeV9z83kf69avf6+7z3+z1Z3v+Rny5t18CJ4C9R1Pw13z7t32r538O2H+vN3/YJn/5B4Da93XcN4EX+E7OZ3+yhn//N30QqIHfl35NJ35n93wbKIIRGHsE+HsGyH5A5H4KmIImsXsIeH9MN3Unt4IgOHYo6IPrB4Q7GII9aHzoF4PMN4NGKHxaVmYy6E0feITKd4NESHtPSH4IRxJPtXeQRnc2NXppA3c4IXdARYZDY4Y38WeD94ZwGIdyOIfFwoYngT925Wl81z+dxoeVp4eQ5oeTJ3n2A3nmwyVqaEKC4nkjUXoA/1NqiQh6ppccXFKAUbaF7Xd73iF2lGg8lshYBwiFCbhlU0iJbmeF0JeE4Ud/Gah6nXgan3hgOPiDASiKr7gmsfhuWQh8mFiDtxgyuRiKXKiJI/OJlViKsriLTliLw/iLuIiMuqiErFiCruiMsxGMNMhKxBgzxtgi2NiL2hiFuFeKx9iEl8iMmSiOm9iNAfKN6OiL6liM5OiN0CiM6TiKUmiOplh8q0iLrciCD8iBI2iNauGO/xiEqfh+s+iMp2iLSKiQyniOB4mKBOluBkmNACmBLmiCFblfFymQL0iCIMmRHclYH7mR1SiSKJmRJdmQzYiP46iPySiNGGGHJuEpUP+FPWIoITtJITZZk3M2d2qYNT95EW5Ih0iZlEq5lHBYePlzeE5ZiJbnZ4TILpQ3iIC4eIIolYyXh0VpEQaQiKKWaqLnQYxIEpBolpF4ejrokFdIijIZjf0YbhY4kNM4kimpgu8YjrnGhAu4jBPplgmJhTRJl0NYmFq4lzuWja9WhYLZgqpIgUWomGzpmC/JgxCJmLxImS/lEDGFkJCZmXOZmIF5mVpXlyGpl6V5j5hJmKPJgKhJkioZmR2omhgJmifolwtJmrdJkQG5kripf7GZl3cJnL6pkbRplxV4mK8JmL35mL+ZnKmZg5bJmg/pmpLJm3jJkh7YgLJpm9sZnBj/+JymKYS6GZGgyJinN5PNSYXeSZz+SJ7W+Zb5+JfuOZzcCZ7GCZ3IKZrZCZvM+Z/OGZ7HOZ4Eyp8w6Er8KKASKZ/wCJPrWIptWZ6DCZf2mZ7g2Jhtt6A+qZNDGZSS4qFUBYaLx5QmeqIomqJj9ZUVoS17yD8w+qIyGogxqipc0iI3iqM6GiA5yqMleVQ7Kh89KqRBCiFD+qNJVqSCcqRLqqQhw6RImkFOWjVQSqVTuiZVGqWMdaXGk6VdyqUv46VI2k0mJqZbY6ZACqY/Sqbr2aZoehpv6l9ayhRqmqY+eqdEiqdGOqfuVqdw6qdyCqh0KqjWyKZxOqh62qSJ+qR8/zobhNqnj+qokSqlk/qKhlqpanGoSYqpW9qolJqnoLqni2qlo4qlnrpfnFqmqeqmnKqpklqqXwqrYbqqp4qqsnqmtJqrt9qSUKaqu/qnvxqowUqQlzqsm2qskIqsr3qqxRqqiuqsjKqsn8qsvcqq0mqr15qprYqpvgqtpOqtppqtAMCiFOGiNDqjWlmj6YqukZeV7aqi8Bqv8jqvvxKV7AKVjeeucqavl8aveDaVcVeV6HKVXEmuExGWFDQvZCmu3QohZ8lBkviIlfme+Ymo4Bqr8ciNElqdD/qsonqx0emftUmdFCuewAqyuLqNF8OJvxOgI3usKGunGbuynzih8/8ZrjF7shAqj/posx0brTkrrDN7NTXLsXzpsUgLtDursT1rtA2btN+6tDS7sSVboDoLtTgrtURLtfhpskL7sWDbmha6mwB6npqJrUFrsVobO1zrssqZrGkLs2ubNiybmwqai666rHOLPEVbtQh6tUqLtaGJnS9rmGbbnp3KsG1qjz9rnncLjXk7rXubNXUrnG47nXIruLOqslvbtH5LoRgbtoF7nWOLnj57tKMbtZrbn4T7tvF3ud+ptqsrs5OrcY4bL1/YPerargY7rgB7hiCKKR/qr1RJr8Z7vMh7vPaKLvjqlbu7r71rroEoaX8Yvb/bhmKpsKGnuJG7Xw/7EIr/mLqa47TWGrdwW7vl25ePG5fdq62cC7qbO4Dre6FPK76D67qSa3OwC5/nO7uAS5+xm7mnub8VK8Cqa78hW7h6q7+Hy6CJa74LTLqY278D3MAKnL8InLJDC78arL7AxKH4i7aie8AAzL8RfLsZBcITfMJZ67/6ibga2rVW+7UuLLsSHMAUjMLFpsI4zMKhW8PxiZ4xTMBea8Mk3MI3bMIYrMMJx8NKLMJAnMMVusJLPL6fe7M/nMG0K7ZUDMUVPL9k68VHnMUlXMBS3LIWHMLuC8FVfL9dvMYenMJ4q6sbjMXxG8c7PMfca4Nq/MB43MR6zMZiPMU93MZ2+8GBPMJI/1zGRWzAVswvuUs+z9uv1ku8Aat3usuu+5q8nNzJnkyHyysuzctXlgy8pdyG13uHqXyTAisuBGuVq1yT2bt5k0jHimw83+sQjlhCQ5zGb+zHUezDCVy6Z3ufRDzDRrzIyuzGMXmhp1u/Y3zHXNzMYWzMvlzIg0zGyzzM9VnNqHfFjbvNHRzMm7ma4YzGYCzE6UvOA7qfHJygiAy5tjzNEeq5Mvy3NKzF/0vI9ezM5Nu+wKzPLyyyffzM6yzQyczM/ezNBg3Q0MzPPOvP4Iy60TzOCB3E7mzHlnvNTwzH7OzRSRzRDP3P83zRk2nOFP3Ix4zPCS3NJq2dGX3OhyzH8v+8x+9Lt317z++8z9rc09xMzaZL0jYtyA16oDu9hOlczAFd0Vsc0kx7oRQFPFFdFQ0Qy0Zp1WCJ1S2q1eX6Q1V9PF9dyZ881mRd1oRWyQYrvet6rmy91m7Nu5SCAHI913Rd13Y9KRph13q91w/t0xZNpduy14I91xIR2IMt2Is1y2vp0AddNblcE48lsZ15EJ+JzI7s103NyFNrzyt91C391+JsoDGd0ugcz+xb0prduRKt0xqd2aHN0z+90EE90X3t0kw9mwT9yw2N2q890K2r20JN1Evd28UpndhszUkNw41N3Bht3B39zawt06CN2bDNusSMuLs91Lcc2yI929H/Tdq2zdxnzN1PPdK0vdzUnc+pzbaczdFmLMzTHd4K3d1Knd3CXdvW3c3e3dmtXd2und7FndvHDd38Ld3/Ld8Int9AXd/Bvd3x/eCi7dzvXbbJ7cD4feAKLtsMft6MzdiMC95BMdXEJ+JFEdaT/K+nrMopzspg7dVcfbBmHeMyPuPzg9aaTMknXrw2fuMoHr3te+H+HeRyatiHrdeFXeSIfXo/jt4JjuFH9dikFtm8rOTcyuQQLuQB/tsDvuQdvqofvlhcztsA3twC/txhrt0fjaGcWavD3eRYftlu7tvX7cBnft9WHuFlPuGWWq1drrhfTuXZCuTqLd4wLeGNzJB8/y7mcZ7lc37Be67oVz7o813e6FnnDu7kb37SDgrivArpmC7p5L3ZF2rpaS7ojK7fSk3qL23IkX7qC464qn7boN7qZK7lZl7lfW7nf87mbU7r4+3r8I3ntq7nndisl57pwY7bw37ov2jspX7nyt7ofbzjbw29OX7JlSw0X63tL+67NP7t4B7uwkLtcM3jOn7tpkzu5a4qSL7XeB0S7W7k0D7rn06nRB7vcn3k+E7XiZ2wtCzZno7sGQTlmRexUz7ZW+GS/U3vAp/Noa7a5v3dpg7nwF7Om97LFe7oIC3rny3s0g7cHB7wDC/nqI7dDf7sHq6eCF8Qlc3SFF/vLx/tJf/vwPZ97CPf8TL/6jR/8qvu8BXfzoZu2Yab8X088b/u8TOv8QTu3szO6jB/9Dmv4SYf8mje80U92hhv2vQ77zhP8jqv9DWP8l6u8l3IR05M7L3+8xsP0ZS+4RLP9TF/87X+8QMe9laf9k9f6Hne9M9E9Xae8hk6sW+f6zbv6lK/835f+HHf9YZP34hL4h8M+Tph4uaO7eiOyi2e+WIt7pzf+Z4PK+qO45Wf7pev4qEv+uy+73P97qrP76zKALAf+7I/+7Rv9I567/iu763f7/kyltsrH6umYatK8KRn8IiV9TQdlxkH95pu1AtPsoPvJC8BAQ5Q/dbvAA9wa8yv98v/LvSvy/Ten0F58gAFVVlSAfhrPnxE/8sIkgAUUFAOIGzoj9LIn8fQKCQQUFBKFAHNtv0W7/wAESCAAAEADB5EmFChQYENHT6EKJAAwYgVIxJcmDEjQQUDPA5IcEDkSJIiJ3xMgFHjyoMqWa4kSMDizIYxadJ0+TLjzZkTBfC0mFOnQo4fBzgomVTkA48pCw7d+BQqUQEygV6sehXr1IVaIfr0+lAoVwBFPUZQqpRCApBjubqdajNszaxzBcKFalciRb14h5o9mtSCBAtJI7SVStagX51y7Tqey/il3gBg+yZWDBhtSQkCJJSk0FQyy9Ew6z4+HRkzWcqWUStGCDgw/0mCAkoeRgy75eq3qcNC/s17amu+r3WXFdARZcnaJB2gLK0xelSrqKurPg6A+M/L2WUPgECbIEm2ooVDnb4QuNf1WtMr3E75fcLvCSiMbC4SglGn3s//9Y29AN37T6f4ujvuuwE2OyC/tfibL7YCX2rvqgqBivCgA43TTcEBHoAAgtpCxA26CUk70bTrgltRwOw2xC7B5IyicYDaajTPP/9aJJBHC1PUCMbg/FMOxwEqEKACI/uTcUf5BvzxRb1ci7HDGY0c4IIKLlgyw8WApO5JHzEEc6EGpiTozLkaIMgA3Qwg6IME5qSzTjvvdMpN2OAUQE/F+BzzJjah5GlQP//J4rM2RRdltFFHH4U0UkknpbRSSy/FNFNNN+W0U08/BTVUUUeF9FCu+DQVKlTfbJPVPl1NVdVW95z1z1oRRSBXXXfltVc3Ceo1WGER8BI5HZts0oBhl80VI2WZHVYlBqaltlprr13ArgWIFSDbuRYoNtwyqfIPAW/DAlcAc7UliAEs321KSBeRtfJYKwO96UKePIQXJXkJtBc2cZ28DF+cruTPAYUXVrjEeNHkjkOBx6WPYgkJtu7JIj9CSi0aE/g3SnonDjgzQvc9OV+Ez0prKf5CJrNksgaW0eCZ9FV54wEmaFmk8kCCGWWZexs6rpQPtjmolR/oWaQQ/IX4yaL/0bN4N4xVS7oiwOxr2ueH7aJyyJFNnhrArLE6GyLAwuv6gJOAjhrBeseeueov0xbr6JsRvo+kwQpLii2Q45aYbLqJrllMjT9imqTOPivpObjBLq5Kkg83+moWF/+IZ/zGK6m8wSmP2HLD50Z9YrwdwvngjRlsEPSR9vt6rrDnTf30y+9V/DKdO47dtpEerD2s2wHGnOqyG9Nb6dXp0jkkkfITyeHRba9c7NzrXp7C5rX+HiudwZte9rehJl3q5AHsHsXn7wpf7ZWbeuCB2up/4OfivTpe5O0R/5/R3jeQ+OVtfB+50ZKCprL2maaB0ikg6yIIPSwhSUkKJJzpuLc+/+ZpTkAD5FeWtoSlBKgJewIwoVcMBSsW0upVLnzfoGIYJzzVEE+kwmEOdbhDHvbQhz8EYhCFiKlYDWVVtnohEouokyMqsYVOdOESXyLFDgZQeRz03gOjosXsqMduNLMi+7CIIi52sWJjdCAaIVjGM6rRjOQKYxXjmEU3epGNbzTWHMlYRzjqMY1+xGMbdzdI3RVyg4DcYiABaMhFMjJzfBQkIRVJR0k28pCVfCQi7TjJK2Kyk54Uoyb7CEpORvKSjvwkKkNJSkqWco+sfCUs/yjLNUISj14CIy0TKUpTntKVu1SlHHW5SV5ezJZvxKXd8lhMqx3zS3dE5heVmctgtv+ymrH8JTB9aUluZnKYo7ymK5MJzWc6c5nf7GU205nKbXoznLVkZjnVqc1usrOeq3wnPe+pznGak5rttKc752lMdBK0oM2M5znzuRAqrqSJp7oVRJOIqIhO5aESbahGLmrRispqiB8FaUhFOlKSltSko8poRjZqxI6yVAAFgGlMZTpTmsKUIDXFKU458kSKThSjb/LPtYQ6VAaIK6dHlelNkYrUmPhzmsr0j/76BaGnLpWpL7VqTpua0H8O9EtSnWqOZJRVrWKVrDTd6kHlqdYuEgSsDphAXOU6AdqZyD9nralS8ZrUqjiVnG0VgFQbpxTrGXWvfD0sXwngV3NGlT//fVPK+XIz1sTa1KyJTetCwQnQebrVKGxrGVPEaqXKWra0BcgsZ4Wp2UB6tildIx6TSFtavWK2r1x9qle9+DPg9Yx2sp3YaWt72NTu05qqzaZrYdc0wRm2ssPda3EFalxs6hY55QHtSAbjqAsA12TCvSxxb8vWrnrVtdJz3KMq4N26gRe8i8XtXx3rkeweYLuM2hJ7e+Ne2o6XtQb9bzR/xrWeSW6ys31ueKPrX+RWl7riDKxRlhs4u1I2we9lbELn27mW1fXAwe0vhuPbWBlJFb2g0Z9+jcbfBMOXvLm1roTAGgH81dhhH/5uiPvr4gCvtceABWtYVUw1FtuWxw2eNOWPNxzWCiPYtiJ+sXxLzOSPOffJO84wW1PKULfa0MtzaikToYvXMZ9Vui7dskJWiuY3BQQAIfkEAAoAAAAsmAAzAN4AvwGF/v7+RzyKAAAAAICA0M3cOzs76ejp/QAA29nnAXp6ydnbSkCJzuXlQjZ+UFZWkzU1LWhoqikpKwAAzhcXABYWbUhIqgAAAFVVkCRTqBxB2gwYdi1nZTJzu9LSMChcuxYyxxIpfz8/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AFRAYSLCgQYMIDAgQIPCgw4MKFhoAQLGixYsYFQowgOChR4IRN3b86DEkR5IlJY5ECVElS4cmMcqcaWBBgJs4c+rUuQDBQps7g+5csHCmUYwLEQAVyjQAUQFKmzZ9GlWqUKpLrfJMmlUrzqdHw1L0KpTAQrJBi4o9upAA2p1t3+qMKxcn3boB7tZVu3Ym3ptmBfzNK6CvUb1yEb9VjNODAA9NGZOV7JWv4YuDAw+2fLkiZa2frSpucOCABAESSh/IAFeA27+hpXLuDCDzWdiFaVuMHdn1Zt86P5ReKED1AQ5zgePlzXR2Z9uCceverXxv9cTXN6gmbhxE8tfLry//zj299l/N0ssDYC6UfdrrGrYvNH5gQ073rcFbV28eL/rw/OH33W/6YWAcd/TdJx5aAirIH3S/BbjgZBNWVh19CBqHgV0VgtahaORNB2F65TXIoX7YgZcBhvPRd8CJBEao3ogAqmfiTTcSpiKLxbkII24ojvfgebfVWOKHsiHZm370nZYafRviqGRzU7YXom407mdjle8FyWB1BqpmgQQW0KeBgl5SmKaFQ/pXpJZHrumhnCCiGJ+Lxtn3Y3h0JtlmXf/BOV2OhF7HAZ6qeYdmjCSKSGR0Rg7KZX6M5iQcosgtCqSM5WWJnYR9LlkpTqThydqAmzaK5aOcShoqla9a/xlrl6N+OiOrqtJW6KyUpspnq6u6Camguu1a63i8ohppsIC+aWucx36ZrKa/5vocXguE9NtE02mkQFdffoubuOGRa5252HGrm0bEtevuu/DGK++89NZr77345qvvvvz26++/AAcs8MAEF2zwvA29VFBCCyWs8EAxdevSwyBNTDEBEa8rEQMKdOzxxyCHbDHFGdNWE7Y+CQCuV2BtWZVcWGHr3GULMTDAzTjnrPPOCXAl85W0eSrkltPuSayuDO2s9NI9T5rcn3IF+qyr0VIooQJLZ51z00VLCfVbUg8Nra9Hd9aw1mhzDWzQuC6LdNc6rm120mhnrba1lwn9JahVs/+5JdZ1M13o12iFvTfRfXt4deCCO+3grcLKTbPjRk9dLN2M63y329dGjrdhxn7e19mZaz445M0Oa/nbiYO4eOlbn95p22VPDrfskgIO+82b126Y3lYjTvbqc+u+e+/E50178qBTLuXtQBe/O87Ii+2o55zb3rqff0/PO+7MRu2s9ZdDD/3r01d/+OzY+z6683GLvhbp6YPPdvvMv2++5M0bD7v6wWNf6vinv+31Bn3Hs1/nBig/sYQue/3z3gAA6DcBik915GPd8DIoPe9RUHGouyAB5wc/BU7Of6X7oOtCiJZsje9L6jJZw1ZmIXSNJ4ad8VYCdsjDHvrwh02zIQz/y8OugxnxiEhMohKXyMQmOvGJUMRXyKZIRQZ4i4pY/JhEiDiyh5lkJSTbosREcjGIdVFhJcthAiTIgIWssX7Rm9zL3hKzurTsSHNsoc/sODPDvHF6Ifnj//pYwA2uj2qGDOB0BAm7QHqQkCTcXwPD8kD3rYWRpXMkHPmWyAoisloQ7AsmM6fJBMaxeZIMZSQNSCX+jJJxpRzkKQsJSks6sIS4dKUEY5lCSN4ylbakJC7Pp55XBo6XmWsaJ2uZv1V2EoTlMWbdkMk4ZQqPmRzU3jNXGM1duvGRs3QmNg9ZPlZaSZfeo2bgrDm2cSqynNvkXjfT+c1NXtM6xGwnPkco/xZpok2ddWPnJ/c5SbYMk59h8afWAJo2XwoTmM38pTnfg05A1tOUyySoKiUazwMW05sCUGjjMpoihBoUotn0I0hFqjSBwtOdnnypRoN5FJYqjaFac6kGYQrNgZa0oEax6c5wajeHnnSirSEpskwa1JWCU6nSYupMKhnRhDrVnvr86UYfitSnFROIYP1hIMNK1h2KUWMMoaGHhEghHF7GW2oFEVst5FbDFDGKeM2rXvfK17769a8Dc9jDGMaQMmLsrDIkYxm/aNg0vnVjWYzsGV/iWLvGdSopuyxT7iipPJKljjAz6mEEYDNwepZlovXL8lJKy5lW1aAoTKYJf7dacv/u1LWsJWFsqznbvgAvprfVKk1Hu9t19nYtv+2pTIX7WuJKUIXyvB4Dt3rUjraye+DMp3RFCNSpHrS7MqGfKbUbPrC98J3BXSp4kYK58UpVJsnlpk/VS13nZve9GIlvdJdL3+F6t7gBPa5Y9OvRrPa3uf99roDDQuDrGjiq672IeGVJ3vtN17/h/W59E3zfCFukwee8J3Nze0sANzSXLCzceYE7N5TatoNwrPACubvhDLsYvTB2r4crAmKKivjAJBamiXO64KP0OKk/hnCN2TvkoqLYgubF4Iu1yVP5Xq7JjZOx8vAX5OpWeb9Iw3JLi2yUI3v1wWrCr4TbS2E1fxj/W9rCTV37AtdxaZZKc16LDstK1hnKmYuADbSgB03oQhv60PASrMIIq2jKIjaHk2UJYxf76Mcq9mKTxnSlLYuyn/xMQqfVCmjpmNoMh9oqo9ZjOAdcWxxTGbdTRmVXHwdlFUvZ1bK2bojRbCEt05bLsW7tiIMtTljj2retZvGrh33sYjNb2b++MIJtPGsOQTXNOx5LspWbXiVjmL03hjaygd1sjn65wPP19rTzu20rdxvbS15zuLk94yi72TMa/ra8q+21FJPFcOXmqq59zOs53Vvb5Ba3s4FMbHMbW+GsTji9l83wgHv54ROPNo31TZ15u7vF/CYM4f694owLu+IQ/xf4uR28XXtnez35Xve+B47kWpP81im/+LNNvnB1d7nM7QYzyGl+5nTDm+M8Djq6+evzhqsc4x/fsrR/7l2PC53iTbc40CUe9VyvfNdG7/XB+wOzOIcnz2Kpc7nubCW0h0Xt52L7e9x+lLsi+u54z7ve9853d9HdKN5q7KbpHGmUZDqMGwH0SSh9acT/nSYSkvtWVi3MU0sl1Z8tNXstP5U9hpby/DHo2KlKdWoTndahv9zoY156cIdc86nH9+qtvvR3i/3lEpr960E/WtqzPPaT0/3prZ3ko8sc+CXv+smznvPe7x74qn856Z2uc5TzHPoZFv7XCR52g+N+S9qH+v/VvS7+2mNfmOHfufJ7bvzWQz/5419++6nv/OH3+/zvS7/11+9w9ccf/2umf8x3fU/nf+YHgNknfaxHf1X3fAh4SwI4f1rXgPYncg+Ifgroe2DHdBLYfPgHfwdIfgb4e7bnffGGfDhHgNU3gPxXgPv3fxcIgiQ4dNtXc91XJ2OXexnogAWHgy/3eDKhEdsCaHO1VpKXHECYEX62dn82Rn33hFAYhVI4hUbUaCzBaIKXeGO0eJrWeF40eHpWeCRxeF+ohetyhF+RWZ/mMmh4E5iHWrznXZy3WZ5HanEIX0o3g1jXgSpYfzVYdOVlaxF4eyeIbxrIfYF4c4NogkhHEdP/N4F4yHUw6IIsOIkryIctaGR5uIEl6IOF6IgLCInsJokhKH+E2Igwd4g2mIheAXAeaHp/iHoceIrHl3SkqIci+IKlyH606H5vdoucSIPlh4umyIi1iHBTx4CwOIzBuIe9qIyjmIyiOHOxSHw9mCS+Nm7S+IquV4Gw13E82HKCuIPeeIfdWI33J46KSI7oaIHXKCovZ2ay2InYuIieiIryaI03WI/syIyIaGEbd4ypGI6zaIy+aIvb2IcU2I7faIgECZAu94kDWY7X9ozT+IsJmYl+6I+rSI/wKJF/4UIpCCJJeBFwhx1FSJJE2IZSkpJJUpIWYXdUOJM0WZM2CYUw/1kRGpGTALCTiveTWwiUaMWTPQmGaSchW4KUSbmUR6KUTXmBBsWUkuKUUymVl0OVUMleVok0WMmVWzk3XZmV+PaVkxOWZUmWzWOWYtmQMKeW7+OWJASXtySW3oWWb2mXcYmXc6mXD8iWbNmWfBmVgTlactmX5kgdhSmYT7mYVcmYdDmWjumVkQmWg1mXlXl+fnmYiHmZWsmZa5aYAJiZoGmZk3mWpZmWj/mZp3mXq5mXrbmXr2mYnrmZsamYjXmbV5matJmbtUmYswmZvUmaugmYuCmZxUmZwZlho4mZmgmKy9mZyQmdx2mawymav+mc10mc04maw6mdxsmb28ma4f/pmuMZg83pnd+ZnshZnsL0nO93nn8Zn+eJnusJnvaZmtYZnaqpn7t5n/VpMkIJaWaYWETpk0F5oEMZoAJ6kwzaoA76oHxlhSiBhYxXoEb5dhdadxkKeGL4EWSIRhtKEyzZE57GR/Mpn+5JHXN4FXWoaiPXijLYjNzJnrZZkPcokPmYjv5JnTuai5W4ixGXkZbom/wJnPv4kfi4if/4n0zKox4JK2OXo+5Io0RKpQvJkYAIkeMokSianY/IjZihpB3ZpDPao8V4oweJjAGZpvTppGR6pvwIkmKapW9KnmbKiwYJjWEKjEvqpn5apk8qK1E6p/NYp7Bppcs4gjKqjWv/qqf9qZ5/CqdIiqOEqo93eqiXSomYOKSqxadjGql2Cqk+uqlAymCVqqOiKp6ZeokWCaYY2agXaaSrWqU2GqdJ6ql0CqqYmqp4iqaOipCw6qqyyqu7KoyK2qcaF5Go2KVFCoqqmKvJuqXLeqLUWpF5Gqtq2kJmZx0WOqAC2q3d6pK9Ia54BmgQeq7omq7q+i8SShIU2oXgqqCWBq5gVIbx2i0jqoYmmqLDaqiVN6LMqo4wOpKcqpxeSq3ZGKr1Jq0CGbCzeqUNO59S6rDE2p7FJ6zYaXMDu3r8mrFHSowKK3XBqpAG26xtOqoYe7KMqqwR27Eq26sFK52sqBWuSLIy/1uxNRqoyKqqWrqOXFqtB5uwxSqyLMumFOuvrGqz+ymwNBujOxuygIq0G9my/raxCuiy8im0FquxTUuwpZqzUiuctfq1tNqzVvuzWIuwHFu1Xbu2Jpu1bsu0VlGzGim2OFu2xmq0EnuqU/qwJYuo5zitL9q2V3uw1lq3fzuzc+u0nxq1urq1H7uoULuyDKu3aXu4Mbu0iisVdJu5jxq2iauzjcuzC8sy25ou8mpXIRqEq6uEadWECeqE6zq7tFu769quH/Gujpe6hOet86p49QqiXii8BZqvJfp5l3uwKxoUbyhqfzmxQPu2akuRbLu4XguyQ+u42iupUBqPfHu0j/8Lts54rSkLvckrvc9aqERbub/qsX57s3n7o9hrqriqvts7ufjbf7o4v5pYv5Z6t3YLuoF7rKOLXN8bvYCruaKrLLcqpGQbwOGLt+Prq9hKdiOLuPAbwRCMsuSrtK9atO37stlLuhNsq5Tqv6gqwAqswuBIvVxrvXH7vitcwpPKpuZruEH7kKV7toKLw+/YvXKKwn0LwKHLwg7pwnLLuYwLrSR8vyMMszUcwjcsvWmbvv+7w4SLtj4cubTycg3wKF9cFw3QuiZJxjFpxjqJxhShEV0zxs7jxt1qu3I8x3RMhfdKoLwbhr6runvcu3eMx92CAII8yIRcyIY8EUlhyIr/vMjg68SQe5UcsciSPMhFEcmTLMl/abwqs4ZUrLwACxWfPLgwXLid/MOC6r1C3MhNvMpQDMQNfMGe268aXMQ07Mon7MD8K8GOLL4cTMHle8Dnm8AtzJB7m8oILMOfW8unHMS4LLlP/MyPvMDU8sogXMGqnL+a2qoeDKzVnLLXDM1J68vbnK3sa83HTMQDvL/OTL/N/LTgrMusrL/yu879284F/M4bHM/Z3MEYvKf2zMTYzMv6HM4mbMPAvMXIfMTELMpKfL30DM8BDdHyTKq53Kn/bL8DLdHRHL8U/dAWDcsPTMu7rNEEHcUVPMXCLMsjDbHq7M7sDNIVLdIZnc+t/7zMqBjGqYPTbwHHeZx2alyUfazHbXwjPC27dXzUSJ3Ug/bHv4uggBy7UP3UUp1DHfvNGz3LWmnJl6zIlbzVmDyfVX3ORuy+SLO8Q9GimQfW12nVAh3RLD3PLo2A+ZnSZD3WE7nQ3SnCbY3PMu3WidrS9yybpUzXel3StsymYR3MCe2sOkyXc73YhU3SUwvYAG2eCI3OM9zLBR3CiX3Zdv2l4xx6j43ZybzSfw3XgS3X8CnWWJ3Z3GvTAtnZgw3ZoN3PKOjZrV3ar93FEinbhM3Wk43alS3Yv83app3Owo3RUDnan23cu90rvb3Wzu3XGazZJp2yvk3b0z3R2mzbsf/H3Lmt0jN92h0d16G52opN2gqNpcp9lE7d1FH9rT2NoW9M1D8tk0qd3/q930rE1Hzs3/893xoa1D5N4PRdoF69yIgMFQnO1dt91cf9mVrd4ILc1RROyJncaZu8r7gd4Sr6yWY9eQzdFJ0b0tU93ide07xNzeXszQ++1/sszt5Nzj7bw7MtzZVj0Mac3s1txSm8vjVOtTfO0d0dyzTOw0Je3Jh73aGN0tpdxY0drUFuuR3+3Ax8yzDt0X3N1ynO3fxs5BbczaEN3DTt5TIO5k6u3nUd3oyNxJtL4kvc3hCO4q6t4tDN4lMewmS+5TG+2Sd90EPO5nfN3lcM5EhO5YH/bt2HLcWAruQ57OZmm8U27uimvOJYLuYzHtllPud2fuU6ftGF7uHiTd3USOg/bhg6HWWpThZFHd/wPdWWNtT1beADzt+2fuu4HjAArse7zusCzqG0Duy9nnYXTsgLXuwY/uKSvelROeEUbuHInuF2pK/IW+Wk/uE/E+I5wVlvzhQlHtNdzulcXuoEPNz1nOXmzezLzueGDdufju6pve7hDuPtbunvjulgvufzHtzlHe8fje8mXufjrtudPs2X3uJjruzq/tb9bu5bB+qnTucEP/DrXe5yfu4AD+4CL+4cX+93fvB5bs48rsz2zug7bu0U3+Z4/cINHcNqrunkXeQB///BCJ/p+r7xHu/pJg/xQ9zcUA7pWDzKWpzoVm7w917z+a7w7M7vMq/xNB/yLj7yiu7uOw/vDr/wS4/1yN3wF//wVt/1Wo/z8k7ulA32kcjzNz/xHc/0Xz7zFbHqNwf3WtHqsB7g7x3rf+HGsh7Hud73fv/38DLsAy74g//rNHHfiB/sNIHsgnzsjJ8ySskAkj/5lF/5lp/2Y+nsDQ7txS7tMEPtdighpYVVxa3tX4HWcDji3h7nof4+YsYzmK/ypt7zhi7pERtbIfAAur/7D+AApqP0W9/0Wh6NX9/6urUzDoAoByBIOtXjUU65UJ/w/pMAE6D8DxA7wE/2yW38Qf9a/BF//DlTAcpfGhBAPbE/6BbP/S+d8cO/ZsaTAONfGhFg/tk/zLP/vI1+ZTkTAfFfGr4/QQAhQAAAggUNHkRIUGBChgwFEggQUeJEihUDPLSY0eLChh0LagQpkYDAkCE5euwoUMEAlhAOvIQZM+aEBAMSnETpcGDOhhhLavT5c+NOngmFZhwp4OjQojpXsnwg84AFCRakQrCJs2lBrVsBBF0qEWzYi0S9EiQ7MWlasWbPqmRpU6oEARJkRmB5063XrlvHhv27tG9TthHXFh5cFG7cqDEFCpCJNetev5SbBj6KWWhinoUDHGbLOefivDIfx3wQV+9Zg6JRav4Ju6T/a4+eQaelnVLA07gOHAuMWTNv7p6WFQuAiBi5Z+IMbZNUzprr7rh5J8A8/bJC9dXSvxrnKdvk8ujenystL510XJcvs9Pk3jyhfITiQdoHCj7neeb6X1OvjiUHBnxsQMlUo++gBFsjL7QGcfOvtsJuI2vB6XgLkKXHMhwuQt28+y45B0WEEET+0mNtvQwpEIACDicD0UKFHqyQRsA8bOhEB2ME8MULKLjgxe7Uw1EnEms88kYQG5hQICbTakAgA6QzQKAOEsAySy235FKvKVmrUoAvzwozyaOitPFMKal8rE0334QzTjnnpLNOO+/EM0899+SzTz//BDRQQQcltFBD/+sc06swE21qUTbFfJTRRtcEk1IyLVUU060krcw7Gb/jMVRPi5yP1Pp2YiBVVVdltVVRUzRVwVgZfBXWWt8SgIEXd+Xu1k5H9fVXIoO9rEdehSQ2vFmnS1bZZkdbdkYM83qgWmurPTCvZz36tNtoQQU2XCKnHaAxqeBTbdsPxbWVXVzV1W3a9qR6yYFe3RW23WHxzffdUaeNgN7g0uX3OHiLO1inhEvF0DeBYUpN24KdnZjifS/WF1beErjuYZgkS2BhWUUeuWJoSWaQtwo8jgkvm1CeEeaYTf5P5u946xgmqqySqqaQaV4XY3+F5utbb3lzGCa67JIp4p+JLtZmb/+lNtrYgLEDTibhns64aKq/Bjre6uY9IDuY7JUYaoPDRphthd1mOEBzzT5ggnvVtrhrr+E+leppE8Aaso/v1jtqvkvGO++hNc5w5bKzPiBighOv+XBaLWcWc2kzTIBAgQZ0QLjJC1+b8spN57ZqHsmNa0NkNZ9ac3BRD5rxF1l08XXa396dd9JL/11Z1ln6MUghOeXJ0UshrZT55ZHPSfnnI330yi6v5/JQ7bfnvnvvvwc/fPHHJ39P6FGSflNNJxWgAPffhz9++d0XaH777VeJ+ubP9yj9TWXmSKsEOMD63c+A7yvgAQ34ENnFDkS9O5WujrUrvSjwgAm04PwYCMH/vskOgMaaYIAqmEH8tY+EGkROA1X3QA6mjEMPiEAMZRiB7cTHhCeMHwZxSL8UtvByPmTh7HBFrqT1zIY7zOENkViADQbvZB6EHQgToIGHuaxDS0SgEpHYxMV1cW9B/OIQA+S4hx1ohFjU4Q65GEYv9guMiqsMhgDHMnSdcYlpxOEa3bhHw72xj2KMy9VYhjY7blGLauyhE0+nSD8iDpADIBvLfIbHE1KShHr8Yxsz2cjU/YsxUqFKnC5QSERikYcEUKHNPrixuciJAqTM4yFjiUogzqyWD5TResj4klC+CUiwrKQsg0lLRtZOk5wslSdVkzOBSQ6YlxQmNIl5TDjynhGZHbRdbySJoGhm0JLeTCQ1n3jLVWZIkFKp4RVN+U0LYhJ44lzkNduWTfYIDF3qRGM32xlONvbTmvLMHD3ZAzqCZmsypmSiPhXozmpu8p8AFWLRhhfCZ4IToQwdZzHnCdFkjiuEQmLnQhV6QX4+NJ7+5KgtH/nRI65zpAssqUNl+k5F6S9TArAe9nRa0X1eNKbJW19R/Mc+/nXEAAEBACH5BAAKAAAALJgAGwDeANcBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ5wB6esnZ20pAic7l5Tw6SEQ2fpM0NFNVVSsAAHksZSxoaMITLI0lVaoAAM8XFwAWFmUyc6gcQbUXNq8mJgBVVTAoXG1ISNkMG7vS0n8/P9APIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABcEGEiwoEGDCxAIECDwoMODCxYCmEixosWLExciaPiwY4CIAjZ69AgSo0mTCxkMWMmypcuXCTRyHAlR4smbFGl6JLBQZ0ebOG8uJODz4dCiDoEGRSlAwcunUGMKIIrUoNKlF6sa5ClAa8GrWCse9TpwLFmwYTM2hcq2pVSqZwWkxUh2INe6AdCmNeuVr1a9YRc6bdv2LV7AWPHerYt4qd+qj5E2DiqYcOHIRSfjVNyTsdy5FjH7FK1Ts9C1lqOSpmn6JOeunkGHnnqYdmzZFCunfmr4Nm4Ar2v/Vgu3r+24wwHo3u2yN/LhwX3jXj2SusfWTAczd2v95+ff0Z///+5u9Ljx5Mu3r3R+HnrdxeKnm/87H/J3+drVD2BPP3n49uPVJ5mAmd0nW3rq8Weff+91Ft+BBI4WYWkGgobgdgoOyCBZ8AEoX3H0gbjgcBcyl2GBG3rVYX8kTsiai9VVOFeJu50oYYparThigCIO2COKLeaXIHlJyZjWfyzyWNuPNwapH0s2UohjVTpq2CKTFGL5InqoPRnllu5x6KCHEGoZo5nXGRlYl/p9GeOUSFUJpJKeoekdl0JiSORB2NHVIGwPWghjmnaWh+eT6+1plZqJkbWAAmPSV8BvBQg2k30KXDrgpLhV2lQCoIYq6qikSpWpZ5zK5ulCrLbq6quwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa2ysChCg7LLMNtssAp4m6+y0zkIqQKqgeVoAAtR2u6y123rrLbjcikstueaeu1C46Va7LqWa+pTQQvHqVFKLInkFUr5a7VsvTf46KpPAjC6F5I4fLqlwnQsHOtfBVtIZV6FFMumBAB4Q2jCZoEE8Z8IMh1yQAwccEIEAEZR8gAZ8DuodxS3DWZScTUpsHMyLirhByayqfEAGOW+cJG4e1wzyxEJLoHLPKlMQtMgcP/yncFcKbR+IICy9kM8HSPCVy+Xh/LXMPtEsZdVQh1hQBT4z7fPXYhOk6NhhqhgpwmVa7WNBXLv/rXIFcoNdMdXgTS1d3mlfTZAGfW/N9QGBx12W4DHXnePdER99c8OMt+0415Hr/bFsRZ9ts9pQc31yylwDPrnkeVG+KNk6mQ3m6YpDzbbKFkRgAdcgwC260aQb7vBesgufdtaP++x16IlnXryYgEaNPOxzQ09QBs03/TTSh3dsvPVrYp+89gTt3D3QyoN/fFil36456u4TRHLzLH+/efhSU0/4/LmrH/32975G+Y9/1xtelhT4IvPRjia2exPaBBhAAlpwaNOzW/UwKCjzeZCBZ/pfBqvyKMwVCFtzida/XnSquLTQOC+kTwztg8K0rOpYOMyhDnfIwx768IdADKIQ/3ElrXYxC1qCMWKzwEWpdZVLicpCFxSj+K5OrYsBCsiiFrfIxS46cYpUvBa8HKUQhhCMS/wi4cD01aeLpARR+1ljv9pokfhJEHd7Q+Ca8mSi7E3ugSOJYJq49EE9YoVGqXHTIC1HJRMSr4MgXGSA+FgjP8YOkDtxpOkAmMcCOoZNQzrfHxkZJ03KD3EUlJ6gKJlIS9KxInaUJCcL5EAnIUqRdyLlzEx5x1lKqJaThCMuDaXLsvFSlqi8IN5WKUxXFiwoscwlHmkpQmbe0pmY7IggpenLBRryk6y0zDCLlM2HbJOY0/xlNWcESj2J8pLFrN0xuZnMAXKQneEkzDgrV/+4A3qSMu/Epi29JNB+anCdCYze6KxJ0IA+czPju2dCU7lQfDbToeV0yDnJOUFlqtKi18RoPCE4T3R2s4EI3eNFgUm0iC6TnYX8J04QKc6CttSf5DukQ1kKoXxeRqQGvdwGXzpRj1YUeT5lyz5nN1KSWKs2NQyLCj0zw0018VOlyiqpLIWqqw7xq2ANq1jHStaymrVYXUyrWhkQLbW6dYtVVNUXwSjFKTLRitd6IhTrute4ZisBcGTAQgDbUDSukCRyVONDT5NGpASMjYs9CWGfZK3JhpKQkaQnJBX6yLlYVj2VDSlmObvJelZQomH57HZCW9iO2pOo5cusSWWjWub/sLZNr8zNTlOq05jmdCm13c1tL+va08K2t7Ll6G+Cm5rhunO0FO1sUV/70bQw1zLO7WNkmeJb1CKXtKcEzXUJk91KbhcjzuTpZqNbWvHCsbytPK8bd/tNgHb3uMB972BFW9xO/ta+yeUnbsbbFvjWVL6zuW91YwveXtJWvwIgsGoQLBb6yvQ0Cj5qaiEsYd7kljgBZmo6vXlhyXKYvyNGaX1nauH/4qTDTzGwPj+snBZ795MZlu6GESXjn0LXqDr+LnvD69kTtzbFIVwxhkNMt+UaGbcU1m2O2wtTJgcuOTB+SY+VSuP08hbHVh6lk3m83yOfNMkl5m6Y4blcrboZ/1SVfbOb/ZpCrrrwsGmKKlameueu4vWsgA60oAdN6EIb+lVFhCISmwJGAtxVrnltNF+V+OhsXfGtmJ6rXelsQzw/ZF5mhKxhCdbYotwrQCoJaanlReNozna9QKYyUlf6ZWi6dMFCjjWRZx1S9fbvoEo+iZeDnR1aE9tPOL0xgBuMTIZC2ddHurWGc03daYPT2GlGNrCzPd8p71qlva41RJON32UP2cHODiW04SftIIOZ2ZoFaUPXbcBtu3jJ8H61vJ8t7pu4Wrln1tix55vUCdPbYO2WNYPP3ex9q7vfrkn4t6ltXFxfO9wDr6PE0V3lfAO8p9i+d8TJbXFz65rjvP+eN8RN8m8Bm9a/ymZxwT0M1JvaO+b4Zni8U87vjMNy4w2fbsWtDdCZw8SmI2zkUEvOYm+jHNwq93lOgL7zhZ886FDvObc1TnKiN33NXW6ndg9ua3091c+QrmqB1C4hPS8lWnKec1M8XR63B+WGh8673vfO9777HdGNXnSiKc1pqWq6r5Gma+H3fHjCJ37TYuwU3SFSxskjJOyrtldiHYt5yxfksXOM8kRaLuKAv2zl6LVxucd989Xn/OpVpzjMXT/y1jP99dV2t8lzr/B6CxX13Qa76GvsdKz7XunAT7Dwf8z7iSO867r/usddDuvmP/34pVy613E/9Ohzf/a3r/3/76UO4umX/uXUJD9wqK5voYN/+8JWffhZzv6PV7/7vX+3ztvPbujnf/f453wA+H7eJ37Ip37DtnUVVnyx93y2B39qZn5NZnphk3w/538CKH37Z38dt4HU92vjp4BStnz9lX4iOHr194HuZ4IiF4EeeH7ig4HXp3+wx3+yx4I4Z4DZZ4EjKIFXVoLqpH6kN4HoF4QnSHwkGFQHeIQJ2IKpx4A26IAkdHZxYXc4wWcw5HlfY4U3gYUypIWBw4UngXd/V4ZmeIZomIbFIoYmES2Stnhv13hGNGlzCId3J4ftQod5aIdXyCVg+BGdR2p/eGrykXkAs3mmRmNcwoPl94JE/3h/BPh/ycFijIiEPihmFDg46reICCh/EPiESTiJEFKJTZiDLliDHCiKOkWKnliAp2h9xqeKp8GKUJiKVgeLDSiLTEGLoViEJOaEulhjvHiJbJaJLUN2wTgbw+iIP4hkAneEnMiErSiJ34eDtJeMwtiJtaiCtxiAM4iNFbaMqMiNN2iEwKiLJWWL5fiLpgiKxKiILSKOuBiFA2iN85eM6UiONDiP6riP3hiL4Khb8viPuaiB4wiDAZl6AxmJGViN5tiOwZiPCLmCD3mN7siMmJiQu6iNvQiJ9viJEal9rniRB/mIHViSzdgpVwVVV8V2FOKSL8KGGOGFmPKHsSOTF/9Bhmq4kzzZkz5phoNnRIL3hpEHaewCeUeJeDhpEdqiV46XlE85RvpSeWeEL4OIiKw2fFipeSFxlcM3hCnpiyomjds4kf33gCMZfO+oldPYkDq4SyJJjfFXliZJkexokVmRgma5jmN5jgvYkSC4hH7ZgxhZjGKJZoOJgjIIkAbJj/pYjxV5j3m5mAXpkHcpmcq3lhl1EBv1mI1JkPT4mQz5jawXgolpiYUZdnQZljGIlnL5iqDZj5B5mSDJda7plrA5moxpmX0JkbZpmr75l5oJhLSZlr8pmMFJmCiZkR4ZmbV5gbdJmryJmMnZiMtpmK0JnHiZmanJlqvJnIG5gxz/OZzOeHpCqJd12Y26WZlzCZjRRpmhOZ3PeJqluJ3QqZ2YKZzdyXyx6Zn+hp6seZKOuZf+uJ7x+Zb2QoXGsZQVQZMDApMxwqAU4aBrZ5N82IU/maEauqEcWlYSOhHaglcfCgAhKqJXNaIlapQnSilc0iIt6qIwGiAvKqMaeRoxKh8ziqM3CiE5WqPztaOC0qNBCqTsJKQ+qltEijxGqqRJuiZLeqQfFnZPqlNT+klVqpFRypZXymJbaqNN6qNZ2qXxJ6ZMQaapd6RlSqNqqqNryqNfaqVo+qNtOqRzWqRvClBmGpJ3yqV76qV1yqR/CqWiJ6V9OqaFmqaBCqaDqqWH/3qmjSqniVqjYfqos5GnlUqpFRankOqmkYqnmIqkn6oWmnqpbFqqnGqqdIqqdjqqotqpfOqqfqqqgCqrghqqNWapmWqrhMqqtwqrhuqriEqrVGqruNqqwOqox7qpp7qsmjqpyUqqwgqnz5qrvOqs0eqp0wqq2WqsKmmiKuqt3xqullaU44qiFzqGHZqu6rqu7BosQdkuQ6l45Fpn82pD59qG9zqTeJguesiv+ZqTg0iVoqarjBoghjgSoKdYm7kVEpmew7qtvVqeFXie8Cmbr3qtF3uY81md61ex/vmrGBurGmueRwiW4DmrzLqqxqg/HGuy2KmyKYuyzVmcr6lt+P/5nNoasiA7shNbsgB6sk5KsPxpoBb7nx5LoNiqs8HKs5ros0frsNKqtMi6su1zmi67q1KrrDPbm/Y5dU8boDILs6nKtMdYiVdbsFkLrWTLsl2rmNG5mzs7tnIroP2JtKWJnG0bsWlLrVSLPi37sy8btoIbtBLbtFYLuFgbs4Tbt69jtoiLtor7sIwbO8hotG/Lnks7t4PLl9SZt2dbrNwauQX6kcZ5n3ibnzkrukm7tlX7t2bXsGG4ouBari1poXOHduParrq7u7yru++aLvGKlLIrrvRqro+nlMObLQFLL1UJsYkrKAeLWJCbnaeLs6GruYvLumCbvdQrntIIunr/q71AK7lJ5730Ob1iS7eoe73hCZesCL6q+b5NlUlxiZtTq7oZu7XSGbfta0z1u7+Zm76bO7qlm7rd676dCL/eKb9KaL4c+7wCrJ7WG74H7L8MvLcGrL4T3GqPq8AKXLn8+56Xe6ABPMCrq79wW8JnebMFzL4mnL8a3MIU3L/y9L8pfL/YS74ojLk4TMMkZcM8rLURrMMxXLNCLMIsbMRqm8NRK76By70+HEiwO75NzMQnXMT2e8QrXL0yDMEvLJ9BvMRRTL8XjL8iu8Mk3MNIzMVKzLdmHMIS3MVf2cFCS7AgrMLYJy8KKinJW7x9bK+3W4V/bHi9W8iGfMg7+bvm/xK8yDu7fuzIgAyVdXi8USl5ZMS8A+u86Is80dsRCct5c/y1VHzFXwzHnLuxnkvHmvzB7rnFDpy3XgzFdsm169uxI1y0plzFQ0zANJvFx/nKtRzLRDzLnVvLn1vHmvydT5zHFpzAyDy5SFfBNVzGVgzDxIzKxqzKGOzCshzHvQzANsvGvuzG1XzGWFzME3zMq2zHrczM0+zM61y4ZUuxt/yxeKzLpZybpNvGXlvPdmvO+azG3kzL6azNb3zPpDzQ6CzD6rzNMxzQarmf86tNU7zM+NzNw6zQ2FzQomzRCZ3RIM3LBM3QBl3OufzRIr3Q/DwRDvAnLe0VDvCvTCnTDf9K0xNq0yDqRzEtSjttvIj800Ad1IRmvB+aorQLyYRM1IPMePUqVQjw1FAd1VI91ZOiEVN91VgtzBcd0ni6LVj91VAtEV4N1l9NY8sbaqHnwQTbyZ+2lYcYyv4MtSg919as0SR7uB2t1XQN0Hbds3gd19vL1XUt2GDs164L2KM82Fu92LM50itty0k8zhkM0WJ8zXd92JENzlpM2Cctmvss2W6b2Te82Yyt2I2t0qAN2eKs2ZWN0aVd2IaL2as92q3N2Qjt2c5J0nm9ya691+1JnjYn2mFMzrvs2/qc24/d0Act0LZNkgMq13cLzBvM280d0dfJwbut1snczlI428P/Pdm9bdq4/c20bbrSLcfaDc01V74I/L3P7MTRPMYUDcRpTNrGzdwpvdG6jdgeLd58Xd3ced3D99IaROBV0dNLHYdNzdTGKzk77eA4TaJCPeEUXuFDpNTEG8kJfocLruAYnuFSBbp67d+dbaNjTdZXLdYoXtbDJ+LU/dolHn9s7RCfnIgtHqoj/t8wftzk/d2iaK3Lbd8kzuOOndount5BHuDPHdiSuqhIbtJEjtqsLYtADuX4fd/WveSJHZBVXtxDfuXjXeRTropdTtnEbebKqeX9zeVO/t5WnuV1C91s7uZeruOnrd+PfeR0jubWqebwuK05HuO3DdvzDI04/uJY/17b+X3Zeavn8ezQ9VnLjg7piB7mUl7eZN7mj57k+ingvMrNOw7mdv7lcE609oyPmk7prAzco1rm4T3qsP7bEl2tqc7p4A3gne7nw/fhjwziDL7hV6jTwt7hHG7hxn7syB4svK7hSP3rze7hwI6hxH6FK47VVR0S1Z7ilR7rgz6mJ57tT63i4B7VZn3JaK2we/7qMu6VM14TcC3c9a3oiX7mfR3bqZzd6Y7raR7nTF52/B3o3R7wzs3vW46g7+zem47G9p7N+J7wdS7rnj7R5lTRAC/qgj7wpv7PBv/D1PzwAl/qnz3moe3d8U7v+g7qi27Y9/7v237xQk7obMvwLP//5B6P8SGP6SN/3o9d8S8P8bq+sAXRmRr/8fJO6kpO8Gu+8VJM37hM9CYf6iCP3Kmt3G9e9Nxu81Iv8qqt86nN81bv80iP3TOf71B/9Bkv50pPxvCs6sQp5ji/9e19vjRv2Sov8/De9Bbv9Lee8gs/wQaudH9fFAj+7By+7IAM4Twd4TqZ7Izf+I7PKobv7L4O7YQf7NMu7ZG/Zwyw+Zzf+Z7/+Wox7lLt9U8/70j67eAu7qJfRlgWWJic1mRv+qHP7l7Z+mTG9Kfe83qP8nfO6LWcZS6xx7Dc8ruPmhE/ZpRF8cSf98wf9T1e8hQB/C0h/MG8/LqP9c+P9xch/Sz/Qf3TPff1Xuinyf0r4f3oHfuWjuepTf4DYP47b/1fH+Xqr/Xs7/5dD/+ln/6+P8H1r/zgf/IAAUCAAAIBDB5EmFBhgIEFFz5UOBDARIoVLV6cmGDARo4dPX5UMBDiyIMSMZ7EaBLlyokqWaJ0+TIlQZIkG9YcGVOmRY0fffoMKQAnRJ07KRY1KlBA0plML94cuhBq1IRIjfb8mXVjUKoRlzqtaHWnWJlkX07tahBtWrMssWr9yTVtya9gW9a123alXpg056r1+5fvybdwQYr8yxAv2MFN7d59fNeh4MBzG18sbLijXMqRlXq+bDF02MpsS3cdTTGz5q2IO0dODRn2/2Kna1GfphobwGrWnC3TZqpbOPCktnPjjqqbt2bfpkETNzr8+eTf1J1HXm64+e3n3WdPT6zYOnfsCcyfR59efYiBDv46GFjgcYH48+vbpS9APn7jQ+Ejx+m//cDKbyADD0QwQQUXZLBBBx+EMEIJJ6SwQgsvxDBDDTfksEMPPwRxwQGdym/EpEq0T78UTTzxPgJdJBFGplDEDwEbb8QxRx3lG0hHH39EQDoAGCCySCOPRFLIxyQqAEgnbWTySSd1W+CvBYIUoMq5FhiOAdZ8SkDJvARAQMu0uCTTzK7Q9Cw8Alz77TkFvvwoTOjGurMsAHHqbyjd3ITzuiUFmPMjEf8eQDTRByDwyE7vBh3vuEiTy/MlQIVKbLhCO4LgAE8//fQtR7+bbVI/96zpz8TexPS1QTfdKIELQKX1gY5GHRS8TFG1qVKWLg1PU48+oLXYCTjCdUxdKTOVT19XAnZXOW8ttlgOkBWTMV5z2paoZ1GK1tUxYeWg2mIZHSBZbZetLthvTwo3ztk2ncDcYi/QSN3a3p2p2VS7fUjVv1h1d9qNHijWgggsMDbdbPf1t9eIuW1z1UDJexXZYiMQIAJar3WY36dEFg1gqUz2quKBLz7O4INpNZDWY0N+dMyJvb05YJIrilfQcakFNeZPbY314eBQrgpphASei2Bp5/Wo00//hfYUK32PZte0nE9WuWmWKXU51lk9pfqDW40uTmm6tk45sp4x/tmjeskeyFN8z975qLwlc5ftpLtOy2lxtYWVIwgON/DwmbHd+7NS+y7YbYtblTfjrAz86eq0s77N76Ubf7tlqLPCQAAMMkc7OrUB85wuwLsSvPK4f+oAgw5Qb3y41lmP/DH3Vhbg97QETDEE9Y5H3k4WjaLxxdX/2514/kKkvnrrr8c+e+2357577ylcfqfmZ5SxRRWnD1+m8ck/3/n0X1r/xJr3nZ9+UnO9X9khkeSf//o3z9+6Amg//A3KS3S61f9Up8AFDhBrDkxb4eiUAAbqqYIWhGADCzg7/2QpSlGLi9UFWZI6PIlwhLlDoQQRVq27IcuEfXkhDDNYwhnqqXBzMxe6QljDE8bwJCTE4AYJ55Fy2esAVvPhyJKoRB72sIl7IRSnjOgpoqVriaS5IhafKEMh7mtTspqip2ZGwS06posPPCMA06i6TRErjAcAGRnXGET9zZGOArTjWaJYOwUtrFr5yqJs8ujEQUIxkHeZU+kW5LFa7bCQP0ThIR33SEhOi48J8iOteiLHOuKxk54EJQE/6cWO4NBeOuRkKNU4SjSycpWqjKBHVmiuCySwjEykpBldqcFd4qlwCTAiCFMpSmIWs5Ww5CUyffkRNxario7spR4lCURp3v9yZBJMwOG0qc3CDPOY3wTnK40pznCyEYG2zGXJphlJa6pzdOe0Yjv1tk56ylNvEvxSAt7HkviJr3zM+6c/2xejgbJvnyvpp0CNlzyGmud7D4VoRCU6UYpW1KIhOihKEgq/gKpvIAYAaUhFOlKSgvSjJUVpSQeigBW1FH3zOeRwDNAAmtbUpjfFaQNOmlKempQg9hRkND1TTaGesAHu6mlSGwLUSRZ1qFxUpp6Ouquk9nSp6XQnVp8a1HGqbqoJ2QAFxDpWClTAK1Xl6VWdCtWobpWt5cTTVw9SAXsJDzBoTala20rItbpVi32diVwNYsQNJA2vKNVrV2mo1a3GZjj/gtXAFDPwucOq9KeM5api/VpJzCpFrhkI4wgoW9mRJhaud9TsZnG5VyjKlQJvNOtdSStS05LTtslULV9Ta9SDSOCNnqLLbGl7WcCulrW5bepuW3uQDSSMYwrygGKEG9La4va0REWubq/LW4M4YGMNku50DVDdxRY3q9k1ZGcHIle6gkphC4ruToVLXtRuN73ove9xISnYMGpgtNOlL3b1q0v8Eti+fRFse+1lWPEGWLsH5myBIzzgkQk2AK81V2yD22DiUvi8HnarY1FoYdBWCwRn5TABmEpN9Ir4ORYOgARkPGMZ21W2AO6wct8KYdW6eDYwppR4x5tjHhtXx7n1L/GggHwqITs4vyCep4QnfOTAIjXFK2YnTF/qvhQtuSb/aTKRAVpQ82X0JBuFX0AAACH5BAAKAAAALJgAGgDeANgBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ5wF5ecnZ20pAic7l5Tw6SEY2fpczM1FWVisAAHkrZMITLTFmZo0lVaoAAAAWFm1ISLEZOq8mJmYyc9EWFgBVVTAoXKYdQtsMF7vS0tIOIb8fHwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABUQGEiwoEGDCAoIECDwoMODChYWAECxosWLGBUKKIDgoUeCETd2/OgxJEeSJSUyUMCypcuXMCWORAlRIsabOAssCMCzp8+fPxcgWLgTqFGgCxbiXIpxIYKiR6MGSCrgqVSpVK1ePUqVwYCvYMOKHZvAKdStQZUyXQsAbVQCC90eVct26UICco3ezQt0L1+fCxWMHUy4rAC8f3vSrXszcU+4AhzzXMz4ol/HlxNn/ulBgAepgQmLDmsYMWYBlXFKDgB5NeXUFDfzlZ1XtoMDByIIiID7wIe+DEcLL+0aNeyLq1tLfg2btlznbmVnwL1QQO8DDn6GFi6a+HLjxysm/4/7PbxF6GjRb928oXf16xO0B+deWP1V5qnHRy5vPvbh4qZp9p9PIri30HUHSADYfPSRZR9o4JmnX3H9+RfgXw9GlVkF1713HQgLCtaggwOeVmFbkilnYoUZzlWigAEi6OF1FSjG4IhgebdifxPy11+Ler2IYYkfyHggggfYKCKOOQIJ3Ik97miek/JdOFuJ03V4JILZBbAdk1/pKCCUKZInZXhULmhlbS8iqBtvCNY42Y1MijkkmY6pOCaLQl655nMvctibBRFYgGSIYDbZJ5t4JqbnnXz+Gd2igF4IApIIKqhkomGmaWOjfz165YmezilpekJKgGlv8SHKqZ2jVv8Y5Z4/Ujrpqev1OcGqB2wg35JgwsqorGXud+ZxpXppK6pW3oZpBsABW2ey+FU2K6S14nrfsrlqC6G0OAoLKKh8iTpstgCm+x24I4o7Kbl5mTtupOqe5q2GdIZLbYThXRsrut/d66LAQbLboLuowuvWAiEVN5F5Gilw1pUSrzsxmwokoPHGHHfssWEVn/ZweBpVZ/LJKKes8sost+zyyzDHLPPMNNds880456zzzjz37PPPLDdEE0IRD13TRv1pdJLRBZk0E9MEmJS0TFCDRHXVUdsE8cULDyUA12hRRWpVYG+VVdlXic2nVn+d7ZjaPBZLIb0Bzw2wvXZLKLePU3L/uy3BT9KNN9/97n1sc35DCLh8Yy+uZt6F52kmrX07bqPlczZeL+WROzo5tpVvDjqaieOLubIKuyXvu4LDCDmypQ/8Omz+nht63YTDfvq+qaO1esKtD7k7v7qL/q/ekhvLefG4H57avsP3vtXvuWre/PKIDx89scnP/nzsQW5/t+u502449t9r731l0K/PWO3zji+8+3W1X35+54/O/ODOsw8+cOJDnueUp7/sGc92pFPf/ayVv+Ml8IDxux3/0MfA7i2QMfbrHwb/VyX6sQV+rJOfnzy4lgxS8H0NROD+yKfB+nHwcResCwiBJ0I2BfCB1ysg/hLDsM9daWTHiRja/7YVMgEBETZCtNh3jpiakgHtiVCMohSnSMUqWvGKWMxizIQGtYQEBmtZQxrErlY1p4FRamPcyEpgwkY2khFqaCTZEKMiFKK8rVobZBtf3MZDPLrQK68yyx2JZz4LtpAtJtSh/wxGH4RVj3sDJCFTEulA2DGSO47clvSuQj1NBm+EMURkvtrFO0iGyocqNGAOK4m4Sw6nlHEz5AldqMBDlnCUB4OlAE9JQFamD4Ih7JsrR5NJCG1SKp005idtKEm74LKRuuwcL5uJE0qm8nvD7E40jzPDR9awUtS8yZeCtc1CRjKUt6zlLEWZzfq88FOmLBcqI4jDCSpyg+0cTDHxdf/Mt8wzmPVk4TpvmU8S3ZCbKaTnCueHzkk+E5Pl3OE5belQdd7ThQUVyz5d1M+jJJOfywRnQ52ZUdJEtIITHWhFgUlDYXJqABsNUkeN8lGOhvRW4WzKQ1/5zszFM17/bGlAGUpRkr40poGLZUovKkqL+nKRRz0pCmXJ1HSy1JsuDWRPUffThTVsiVNjyBzxVcQhMbEyEfuYWj8WmLG66KyMcaIW50rXutr1rnjNq1551sa+uoQBEfOrYLVGsjcyzYxYi2MQDWs0xJaRsEFMwEsZsBDJJsowY9NjXvjYNj+KUrNy4ewePbsWyyYqJKadFiF/ucpr+s+prmVMapmEWq3/Ws+eT90gbBUKm9niqLaXJe1KW8tb1uI2tnXx7YiAS87VvvaqnvwmTkeKE+U2iLmqva1Aqzrc4xa3MtalD3b15VzdQleZ0mVWTi8SXu6Ml5TlpeV5QZrebq3XIu0VzntzGd+mztem9f3bfSuS39HsF5r9tSpxAbpQUBa1ui89MEQT3N3t5la+CxbqcQosGgnzVLtEVakzd8vg3ka4sra9qXqpK86tSjW5JxYAh/Up3BH/V6Yqti+LMTLjwXiYmDWupouHDGIHixjCnPqxNils4wxjdahG5u5SejwWJbuzyMzcsWWGfFATJxnFwWWykEmsYVV6t8SpobJYrExjMbeY/8xPbnCWH3wTNYeFzSTCskjprFM4R9c8dgYLnjUa5DffOKkStDBy2bLWRm8MtY5uNGSR2FYliiysZaWYW4ME17rIda+gDrWoR03qUpu6OlxkmhcZcsZJN5GxQ3MsHF2NVpUI1q+wpolikbjpoHit1z6BW99AuzBB9tHNOgVkmIkdtkJjpJt/hvKcj9zikiqqyxKdppbPw+UB+8fanSJyV1UX1DibWdHfxWdUxa1UbfN5y35Gb1bDjG2UupvafT404/gEbpi++IMJRfNznRxtS6673lNd6oX9S3B5k67fSN13u+XZy0Ur+MxlxubBvY0iqi784ugWuLq1inAZBjzjA//HuLk1TnKOQ9vhct4zvrcM8X+v5eX0TXSIpSzkmrN7lxTnuDXTjdGNb1s8J195ykOO8pHT2+VJL/i5d/5xh/q85AD3uMUrTPWtG7XlR6cIzgGs8yhX/etPD3vHFe71Met7QWO7OtS1TnSGq1zqLE/7uy0ydhwHWHFC3ymQfy7NzX710mnMNMaAvaBOsyWtkV5rpRFf2FNb/vKYz7zmN7+yVBtt1Z6PNa3jmmuUyPqwo/d06Uly+sam/vGM50kdvzbIzMZ+KsbuLLK3zGyz5X60u+d71GE+dbO33dANz3nhgVrxuoO8687nuvGjv5S+I1raMuc58u9O/GwHXe1DFzn/hrmvfITSXfx2Z7rSzZt8si9fLjX1e9mnrf18t1/+7yd389H//OnzX/r0d3aqMXzlV3wBeHz2R37uZ35sR31ud3/XF3PTtXdId35NN37qh3dLB33/V30EuIAGmH0CuH0Z2H329n0UaCFvB09/Zzpz14Ad+IAKiH8SuGIpuHb3Vn/wtoI+1YKyo3bWJ3HzJ4IIuIMQKIT55zvlpoHsN4MRGIITOHPCZ4Hrh4EceIHpd4VVmHUwiIX9d4AOSIJayIQmR4VkmIX+54UASIRh+GwfSINQaINSWBHhp4YeyEOHZ0SYdnvKoniA4nhrkUSn4YeTAohM8Wmcl4iKuIiM2Ig9/2OIS1E0ifV6gbh6H9F6oidGlbc0jyUSraaJQTQ2tydspNN7afN7m+VsvDeKqBhaqngi3BZ48VaAxlWCtAiLGySLPMhVPhg+HDc2uniEcJdjAqZ2wAh+3YaMswiCuJiLyriLr0iHyXiDx3iDdbiFX8iGMdiMsfiMwsiCQxiFOsiNS2iCTWiLzLiBaYiN3KhTweiESFiDOkaNfPKO6AiHtTiG5tiOQmaP+niL5/iP6ciP/eiN8DiMvQhAv1iPBnmPT5iP63iGBNmN1jiNc6iC39iDEylK/hiR+4iGYLiN/FiOAGmFHlmSIKmNdjiRJDmQJhmSKymGJ+mSGymNHQmT7P/YZAcJjjXpTDepkjkpgw4Zjz3pjg0pkPiojjgpkYcYVg6zh5amh4nHh5MnlZXniFiZlVq5lY4YejQBep8IiTlhiSnhiZMIipRmlp3IibOGlk00ir9We2vDimQjl7ViiljRisUWfBXYhUEpk0v5kdkojiPohmYomGtImEXIbcuYlAmXg4VphDupkdinmG2IHG/4kEoJlEypk0OJkEDHfD9pmSIpmZ/JkwwImYspjY2pmQE5k45Zhn7ZmUKJlK75kpyJmHc4m7pZm7B5mylJmjFpmLyJkoMph+PImtDIl2KXmUQJkYFpnImJnJE5hcVJk8c5jxcJANdIm43hnKBZmdT/uZoYOZm8GJrw15KxGZzjeZmMuZwzBRTxB5zZWYwV2ZrPeYKieZS/mZ+vGZ3YyRRBGJ7yaJ/b2Z29+Z2HKZ2eaZv+iZvC+ZfEqZruqZwZeZ7iqZ3J2ZwLGqAN2p8EGocaWp19SaGl+Z4XGo3leZqUmZqGp57AIZY3IYgCQoioIqMZUZVDYqO5gqMXgYhcGaRCOqREqkU+ahFKk0ZHWhFJqqRhtaQU0aSb+KRJMzZ8YqVXmqW1gqVbWpQOpaV9w6VhCqakI6ZeqlNkCjtmqqZpijhreqbS2Kbf86ZzKqf+Q6dwqqIqyp3Myad4ukF/6kJwKmR2CqiFKqiHKkqBWpR6/9qne/qojhqpi9qTjTqpX5qot2SpzjSoLYapl9qloDqmoVqmnLplnrqpp0qoqdqpqzqSzAmpmqqqo8qms8qpldqqpoqr3BarrFqqcVqrdQqsd6qrvyqsZ3qrxoqoyaqoxOofvrqry5qpzeqn0wqrz+qs0fqporqtpJqtqHqtyMqttCqublqtfcqrueqtskquwcquwwquryqp5oqu6equxxqv9Aqt6tqr9qqszxqu3dqvzLqvaDqtUAoAUpqWB5uwCkulTvqwU5pGRTqxFFuxFoszXokSYHmWC0uJh+ixkQiyY6mWbcmWqOeWaAWXdnRs+VqsAntLeElHetlsfTqgqP9ZrgRbrxlqoBuKgyh4oPKasyhqnntqsy2Ksy+rrTsLeEAInjfbrgEbtQXKtDdotBg6rlKLtEvrgk3boesprULrslv7g1XrtEcLtVibtiLKsyTKodf5tUqrtmi7tlS7nVZrrWGLrQnZQV37tvQZt1ort5sZod45oT/bs3ibtB8KoHC7myY6nDoruIa6tzBUtl77t9+quOs6tr7Yt48roZEbuKILnbnJoAN4uQ86sJrLr5yrkJ57uG1LrXkru+HYnidaorBLnrSbtXNLuoSboIa7nxXZsnpbuyOqu3cbtKtbsJR7OS/4uYUbur37rsbLtsj7NnloVg4bsdy7WGIVldr/K7EXO77kW74Xm7EksbFrub3e27EoS3oke7Luu7AqS3ssO6/mGrNckbhJOD0wmrpgu7zSO7iQq6/oqX+BR7y7O7Wmy7ouKrxAq8CQinUB3L+c9L8hOr2TK8BDG8HjpoT7V8BiK7n+2roADLjeB8GIq7y8S8Cga8AWjEwY/LTU28IbbMIZXMMPnJ4h/MIjPLo6TLcNzLwHDMIJjL/Ne7ZBnMI8fMSzO8FOXMT+28PRC8MkrLo4TMM3vMMIjIwSHKkUjML62cRejMTVC7wD/Ji5W6HFa8MlzMAeurlcbMRl/MRgHMUx7E9UjMZWDMRbLMRx7MDmBL18/MMa/MaA3Lhy/zzIa3y7bXzFFQzHiizITNzFw2vGWazEf1zJdHzJdqxnjrzAjBxa2ftD7NuwEOu9PLot8xtW5vvKsBzLXQlG6luyp/xq76t6ufx4ZPkQmKhrIjuj9UuXX/zF+msUopWKNWu2V+vHiHzI7Hm8bOyzKhy7/AvJ0ynNoZy8xWyu+JnDalzNunvNzhzN1jvN3IzJ2Ly4pRvIwUvGnszBhuzC57zNzEzO0BzJiey6luu3J5y5bozFkszPdnvPLLzOi+y7tivCbkvIQ5zGz7zE9Fy3PZvOn+zN8PnBU4zHCE3JE821/ezQ7gzRAl3O9UnRsWvR8vzIJp3N9czQ1AzPHnzRSf+sLGHsuI0M0/gs0f/ZzpP8zpY80ystygqtzTCt0gGtz/ns0ih9vagLzhG9yTwNoQvtwxTRJQOE1XzhAMGco7tciV/9sWEdsj3F1WXd1T8qy2q91mxtaq3cvagM17j81nJda2OdEwiQ13q913zd1xPhFH0d2IK901Jd2ILKEYKd2HqtFIit2Im9p8Nsl0NN2C50zEgxs763zE+txVHd2SVd1C9t1TEd1CvczTVtcwJq0KY92RZZ0aqtzi3Nzr/70NYp0j9N0ko91ebc1Oj82jTN2t/M2Vxo25ib0Lqd2z092yNd2zkt2pT92R9NtgW92Zrs2cht3UwN0tPtz1AN3Yb/7d1UbdSijdQdTcRJnd3S7drU3cxLDdDlbZoO2t0359us/cXBXd3D3dxVPM/ffd27rd3qzd3C7d8ELsbJXdX73dD6Xcgs3d7GfeDineCj3clCfd7uvc98G9ILTtv8jd0XDtq8bc/r/dwF/uHR3bk3qNUUp+JyYdZ03b63bNeYY9YzjtZI2tY4nuM6bqQxrssvHtcwnspAPuRNRLwkbuAeTqiN7diBzdhM/tjn2qxHbuL97VCWnRZ1eb/wCtsO7tEQHtoSbuQHHdsPHt5gzuCwCLDv3cddnoAsyt55iq9cftzojeLbKearbeG+ybjFTZBqTuZenuSA6dN97qpzXuVU/27mIQ7TeH7ogu7m8T3gNfnnbY7bSP7f6R27jf7bej7oyn3b5CjnnL7mFkq0UZ63U17m4I3pdt6zm17fGJ2ip77SqR7oq37SAK7pUj7mlc7mJ07Qrr7reU7qKxrp+E2pog7rdnzfcH6vjn7rl47rma67r97p5k3sfMrse/rjds3t3d7julzjZ33XM7rj5n7u6M4z3u7j4M7L5O7V687uQt5ETy7Yf10V9d7kvE7nia6oS57vee3kAL/XkP02canlys7aVx5smH2Kmi3gxx7tEt/v0t7qKU3f1m7pFF/nwH7xI77viK7qFd/xTg3xzf7oGi/yHJ/h203c/6zyGx/ze/9O6C+voCZf69cO6JAOopKO0+I8zTif8p6O4GhO3jov9DkP4rle8i4v3xMP81A/9BFe9BiP7UHfwcZ+8vn986F89R3O6iTf2x8/7EeP9Twf8T4v06X97CPP8gHe9D0f9UnP7zP/6YWOmWPP9k8v9WfO4QrO9ToN8ihv9nxe80BN4Ws/6hheuS2/4cv99SUu9/B99lo/33mv+L1e6m++pyzOw52PFi7e7mAd7+4u7uLuvume+qq/+ipD+mLt+q8v+rE/798OMQxw+7if+7q/+/4x8Hzt9Q1O95367wAv8L7vNScSaF9BWfare2Sf+b1PlwvfE6S4YTHG0WUP+SvP+D3/q/wDUMrjLPjQLttE7/cA4P3gD/TiH/l8v+iijf4zjPaS7+sh3/5LP83wv8fmD/xE/etuH7sAkWDAQIIFDR5UIEBAAIYNHT6EGEAhAIoVLV7ESHFiRo4ZN3YEqVFASJIKCUREGdFkSpYNP5LkKPDgzJkJF7Zk+RImRp07Lfb0KTKoRwEncaZceRQl0KAyaT4daFPp0pFDLzL1iXWnVphJpz70+tVlVasVnUKlKVWsQ64lyZYF0DakXJBh19oVS7fjWbQIFa5l+7asXo6EicKtiPer4qmGMfLtW1AtYImCrTq+anko5p9FKTNkrJSz2chp/34eLRRxXM1BU7M2Sjn0/9HXkEsPmAz49e7WWXtv9Yw6uOzfMG2Xzn23uNvVrJvzjq17uPTmxyMnz7t8rva63DtC/1w5uvLqt/3eJP7ce+H1h1fPxgm/ZW3zBrEvbs8zf2b16sdn/w+/8uoj6L7G9uusv/cQTGw65QI8cEACcTstvQUVRAw84SAUjUGKEgAxRBFHJDGBEBRygDIHFCoAsQJYdBFGuF4UoMUZ5UtpRQe/0tHGsmhUKEghhySySCOPRDJJJZdkskknn4QySimnpLJKK6/EMksttzTSR6to9DIoMGOskcwwxZTxxzS/XHOoMWdEIE4556SzzhYVqjNPPRHgDUO4+ixgT0HjnCjQQf/3fG0ByhbgUwBFAVugzwsnzVAABB5dK1JLMRVL0+bCI6BC6ij900PnFuSQth0FXA1UUckjdTBTNSQu1fhMdRW9USv1U1b/wsMRKVw/C1VXWHmN9bJZVz3Q1vmGpaxYYJftVdlfN5z2U2Jfza7azahFdVpnc4IWMGmF89Y1cCsdV9h2qdI2Wm5ZRbbeUq+tNdtWtzW222S/Tde3d1VitsN4zZ03wn/VDRi4gSEKFt595e2X3nsX9g1f6R4Gq9y1zrXQXl8xdlhcfRHL9eSLRbY2XGzRnRjhihVmGWCSuypYVY4DO/jjhA2+mbmgt9t5rKJB81gskHdduemR2TUZZpT/+VX5aadbhvrlkOFKWeqrba5Z3aPFq9qqrre2Om2sSx07YoKbS5HiuNfq0Uy7bywT76N13LtNNLkEPHDBBye8cMMPRzxxxZ8806c32cxbzcghb3ynxym/W/LKYdocuKG7o4gB0UcnvXTTJQ07YwBMZ7311xoWulIGJhwoAdS/Vnd22m03Ffbt1FOA9gF49/33BYPf/fXP2QN+pgeeh/55CAwifnnmjz8ogeijp4B65VPHuXmDIDigfPPNP6t68MPH3qAHzj+fg/S/x93z9glKAH74HyhI/frtrxTy8Ke/82Ggf/RTG8PuN5D3EdB83avd7RKoLgEORAMORB/+ELg2/w4qq4L5w2D5NKDBdf0PZxWkQAjLx78I9s56+hGfBVVYvukNT4Id/JYAE8CBGR4Agv5rzgQBWCoBprCH8rNhCYUIHAFioIcHGGESg/hC/tyvgeezQAQsoL/uARGHCgzgADpwgSJpkYAC8eIUVWdCt+hQfxEQQATgF8U0gpGN2wkeGY0kx/21UI13/M6yKujE8wUJfj+8IdiIKEY9DsmM8JNJHf9oPEAG8oMgKKRCzsdCKVKximEcoAoN6MdJKvKLvqngAMhnPkNmkJTri90iC0JIByLxlaUc4imZeJARsFKT5RvlLZfIPlCG0oEQFCYuKTnMNh7EiAdopS2TaUpdnv/wIKvUXxRJqMw1MhOPM4FAOIMUTmRO047e7E4qBxJOdrKTL5Lk5ifRaUmoBIkm8CRmJdmjztvgM56q0SdR+EkQMl7gnok8pweF589/poY3Ax3IGDtwUCVWs5nJM1XnQnK5v2kUJDQKQYlEWiK/Oa6kljsp5xQS0pG2FEQpJQmQFjdTmtbUpjfFaU51miWPdoSjJp2cmxRiAKIW1ahHRSpRh5pUpiZVIQrIHObw1lOOUDWQnuyMARqwVa521atfbcBSmzpWpRYFqw0qXjwduqwGTIusbzXJWQEa0FKuVT1tFc5byRpXWC5znsq064Lw+pAMTMCwh51ABSAmAL2Ola//dJXnX3EZ2EoNtiEVwODckNbYpj5WsjCUaxApWyrLMiSEGegYZ5nqWYv6tbVq9RBvSvsBFW4gMKp1qln7CrrQpvV6lXXIBmYogtvi9qispWZyE/pP0EK2M5adQA8Vu1njGhW5y8VuN5nrHuc2aLASeOIBxlJd6+q2u3P97CRHK6vBZkB/WTSSBypD3qJeV7uvvep283te1gzWAW9E0nzpawD75hK/v9Vvc9P7XIdgFotwLJJ8xUreAudzwWhNMHcvLJLSzvADxaVvhWO54VNlOLLK9U1pHYzB1A5YxK5FsYFNvF5llTYA0XXgdMfrYvOSGKEJpvG3bCxcAoJgsQMmVnCPD6xhEvuWyTWOiASkPGUpa5a6IVZyjC285LrGlq1u5TEBQvtj/QZZXTbGD5JfzNvd7tfEGOavQtB8IDVnObsyvjPnotpRMs35KDqqs5inumc+zyggACH5BAAKAAAALJgAAQDFAPEBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ5wF5ecnZ20pAic7l5Tw6SEU2fpczM1FWVisAAI0lVXksZTFmZqoAAMMTLGYyc7MYOAAWFm1ISK8mJtEWFgBVVTAoXNsMF7vS0s8PI6kcQJ8fSL8fHwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAsAGEiwoMGDBQQIEHiwIUKFDB1KHJhw4cSLFSNebJgRAYGPIEOKFKkA4kaJGU861KjSoMKWDV/CPChzZsGaNgcqRLAggM+fQIMGXYAzZ9GcSHUKSArg6EynMKG2VEhAqNWrAaSq1Mp069KkXDeGvTh2IlWsHgR4wJr1K1i3XZGWdTiXLlyjd22eFXrgQAQBEfoekCC0bsy8caMinrqYcVfDNAVUDYqhr0IBgg84CArZZePEYj+Hfix6NNO9Py8IvpwZA+fSZGGDPnxatsTON23bnfwzxGqFmQ9MAIqbYPHEx4831U27Nu8AEjKzzvyBOPPIs/HWJs3dOdDg0zMT//apXHl3udc9n0fPewR44MEP/Cyffnby+sbxKyXNu7J0+MFt1tZ62Xm13YFv8fddZn8FFtx4AyJYoIEJSqiddz9FJ1gFEVQQn3UEThibhXrpt5yCQH0Q34MgkijibfrRF+JTkgU1wYqChfDajC82hx6PjmEIlAU4HnDBji72iF2FTP4o5E8O4Ogakk0qaVeSQVZZ4nNsXSWjlWbFaOKXb3HZZWFjmgjkSWQ6WeaZbLUJ5pJu1nnhm3B6mWZXLMGUUk5/AmoSUoHaVJECPeWJZp8tFTpTRZdFKumklFZq6aWYZqrpppx26umnoIYq6qiklmrqqah6qsBIrLKKwKGtxv8KUkkWEQqRR7LGSmsBuObK6q5JZcSAAsQWa+yxyN7qa6uIKjoUAgol6qxPRKXJ07Q/VXsttgFUSxoDA4Qr7rjklpvATtJOmy62BCjE7XxpmuksatzSp0C5+OZ7bo3v9utTuwL4S5+8itKLrb35JjzuvgT7myfAAscbccNwIqywwgw73C/E/Q48ccSk3XtxwhlrzC3H73rcMb8ppynyyPiWbPK0KNcr8coUn2kxzOYaPDOcNR98c8s5d7kzzwv7/HOXQU+rMtEg1/Yy0uLKvDTQ7q6MItRaS001uVZfzXTWLW9tc9Fxuvx10iyLPXbAXePJddlerx1u2G5f1fS8Q5//HfVbU3+Nd95C7V1w30KjjdXRgitNOFCG5/m033GjFzjVgz/+U+QVI+5020KHbPfdjmseAOc6e8634nqKPnrmpqNutOqHs24V45iXrrnsaZud+N+Wjz4A7LuTbbPvn9uOput2E/8474vTLjnonzO/tvN5L0BrxIyqdOi6fDfbcvcnHZrA+einr/76+4pvc6rwxy///PTXb//9+OcP/6rLjvSqQvzrX0iAZauF9EqAH9nVARFIQEEZEIEDVBYEEwi+PC0AWgKoIJy8VZttYUtbGjwTB9/iQXWh610hxBrc6CY3yrGQPR8znVWg17on/a5yeFHea3Q4MxreTnqd4yG8/5C3OhnO0Hihs2HygJfDGBoRckisnhKLiMMtOfGJ/4oi34hYOyZaEWdYhOIKjzfFLlaRRkIkD/WM6MPllXF6aYxQC28YxtNp8XBchKMX0XhFLLaRSjAE4xkVE0fdPe+O08tjEPdIyD4+8Y8tmuMSB8mYQq5RhpAc4hsXSUmvWDKO/sqkGhWZOkuScnag3BgiO3fKtJlyk6WsYxbHmERJUvGFTRRkHUUpx0DOjYy2NOMuV5m6VkbvlcHUoyx5OTk6AtOXLgwjM4EYy06y6ZLKDKP2iDk78m3keytzn9C8iREApnB24vyc/tbJzna6853wjKc8JRVACP5PAPVk4KAcyP+rCSZQghNsoKEgMixkGdSgAIVgOhV1wWihkD4ldBYIH5omcL3uhNw659sYWUlHbrFuzTMk4aZpTD0hM3gXxWbxaCnFZHISlyW6HNKwlzeSwhKVHPWKTHlGU7fZ1KXVhCmNdgqznortp9B0Zi1RGlKVHpKlHwUqTq0ZGqKOzKhXQ2ouf7lUvFj1Ylhdmla/yNWWMvV6Iq0pN3t3U1fm9JpfxVhafbrW6JX0hyf1qvDC+rOx8lGXz9RrSlP5Lr82ErBdjele53rUutZQqm6lamziSjLGZtWxP7wrmvKq2MHK0o5QxWNbj/nWqi7WqSPFrBshS1rJhomy+rKsWFULyK3/RjOxQz0tYU9G20gmdZJCVQxsYybbvvZWk6w1aWknq1tZbjO00yPnRMDZsoUeTrooASD7tss+c65snuANr3jHS97y2u+g6DUWAw6V3vbuc6AP9KcC/UkAgT4qofqMb0ATIDwGKIS/TSVNRBmK0Q9CVKNXmWhGAWw3WjG4cdScanA7ithpPfhrDk6pZne43DB90l8XplqGAzxa5boWRh/uV4iRNmK0RjiyE/akRxW1Yp61GMIbtg5n/1pWZ9UYZjfO3YtbG+NrpvhdPx5ZkGfaTOAGlqy3tbDwlszTJt/yyTyOso+n/F8NlxivHUbxjPOU5ItRuahWFiaWD9tjGnNZ/wBlruyQTVzk0ByZW3FO2JmvmuZsrpnCbSbzm/NM3DmD+cS7GTOcCI2vPYO1zy/9s4wrvOXROVquht5smBNNaTdbusskTu6h6xybO2OL0eW6tJxzDK8ds1nLnm4wqF3MajW6GtCwFvSn4exlUWsa0YcxtZR3jWq21bott550oBfN3Wafz8HObvZ774tPBOvJutENlnera+0fmvfb4A63uMctz3wK8J7m7p99/YRfAc5XvtNm90IK2t6Dtlvd3X6WQzN6YIoO2IL0sWhT/73BUB53lF/+Nalf29xlHryXtlWqWQXb1N2y6+GQDqqk4drwYUKXlQnnMLAjM9yeoVatH/8vZsh1vOnDlBxsxe0hxjMt8oXD6OVsszjNZn7smCP3rBDWubMMi2uJRxXoQha6ooiu7FyDHHAdlybPV97qlpM86n6cuq9rvnHTetbhKZ9dz09uV5C6WOkP0/pvr4xb4WL9kWqPuJPbzhicV83nGmO6kRWtcqh/3eNW94ywj07xs39W73bmu9jNHvTDxx3KRhet3yvu+LCzdessH7ln7E46sjfW8mXHfNU1nxvODw/v/nou97Rd7XDm+zXYXYl2o83dbb+P3LjPve53z/tLpXtZ6Kbvuht172W9O6DxJr5+FVp8X2FbhBh8fbb6zW8BEFyE1Dew9aVPLYOD/rFrVzP/3Zse+dg9Psvlf3r4/Zz172eW6rYOfG4Gj8nzvzr9fV9/pNsv//zQP5Hwh2yV139K8X/qJ3dsB3fut1r6p3Hjt3edZn4LWFuQN3cTV4EJyEb2V3QWSHgYKH4KSIDLYYD5h4AgqIET6FsmyH4XiH4dWH8p+HMruH8teH8vKIEiiHo6SHOZB3ik53+Kd3kNKGFSF4MIJ3rx94MFGIR0ZYQQ94Es6IEumIEwmIOeN2pdV2pM+HlWSIKLh4QCCHZduIXvB4ao531jGIEHCIU0KEOqtzKxxxG2JzTPlzpx+BCtx23f1Xt82Id++IfxdIcGASvwViv8tEDu1ny5Mnzeo4iy/3J8CkUa3Nct2WdC20dRAjaJCvZBpJGGy1aCbOiA09KJSjiCZMiAMyiKzkKKNsdpn/iFQwhj3MKKWehhp0iBU3iCikKLD5h4agiKuRiFu1gbnuh0wGiDVJgnvFiDHJiMaxiMbTiMb1GM+AeLqUiE2LKMUoiMugiAZniFQqGNkheLRFaLYvaLXSKO3kiOdGaOrmiM6UiMpbiDyQaBr8gW6viM3CiM63iNspiN8tiKwXaLKhiK2DiKAemOA4mOQuiP5QiQ0ziP4Mh1vaiFDIkV+XiMzdiN+riR/AgnGWmNBvmPzEh+NwiSCVmRtniR4OeQ7QiR6EGNJ6mRJumMZyKIBf9REatXQHWITpPYFjhJENRlMz2ZNoB4lEiZlEo5Kr/nK8FXiEFJEY6oK1PJLMnXiMuXX/0kX5oYfZjYQZpYYJaYif6miWgokV7YkCP5kCGIlgQpg2v5km0pkJGRlqHHjljIf27JkmWIlwpXhDJpkyIJjapYhXt5j3fpknk5lwpZl295hH5JkYypkueImC0Zl4uJgoHJkTRpj/CIcpv5kZ3pi5Z5WU6YcQe5jR4ZjTh4mJ+ZmJj5l3pJl4L3mE9ImKlpmLQ5f7aJmiSpma5ZjWqJm7+pm41Zm3yJirEpmcC5m0CYnLi4j6y5UqE5naNpkaU5W6fJg6PnnEsInTK3nWP/V4+k+ZpX84bjw3pFGT3rqSdRCQBDSYc/uZT0WZ/2aZ/v6SjKl59XWT79+U3/WU6GCF/5uSamoSU0YqAHaidzkhtYQiEIqhgKOiIN6iN3wqAJ+qDXNKE9ch8cCiMfeiUaaiUeOqIUGqFZgqEVWqIoCqEqmqIXWqH5YaJhEqIWGqMlIqN0gqMZ2qIbSqMgqqMO6qMLyqMSCqQiKqT7QaQn+qIuaqQwqqMs6qQ/yqQ1iqQTMqVQ+qQ5iqU7KqRa2qVWGqRjmqRKGqY9SqVFKqZqqiRoeqRleqNsuqVg8qZRmqZ0uqZ4KqViYqNfOqeAuqcyaqdcKqhw2qZXqqQnEqd//2qod3qoeeqmfeql6kGpQ8qo2UGoVYqoZMqpZgoorBeV+rmfoVqqBcSfA0pt97mqrNqquNeUufKUyJeq8oaqthqqiKhuVfkrXblv2uenlYqpuXF9XbKJ6nKW3rmowjqjAXiGqiSelsqsy/qd2Wlc0DqtyuqpC1mt4WmFwHqpkdmDgCmR3yqt4dqd45qschKo0lmYramuexKt1Gqepumt8pqtiimbk1mSmxqpiXquSeiD8FquS/qNaKdC9oqtvsmWzXmc4OqvnZqvzGmclPmp7Pqoq+mu1Emu97qwctmwFSunjlqo7Zqb7+qw5gqxFkucDEux/KqnkHqxNcmZqXWt2v/aqDHLsh/rsqpJshjrszMrmjWbsDcbrAAbhtq0PXBoqvzEtPDVnj8EtWjiqlRbtVYbT7AqK7LKfLRKqqfqtNS2lbOan72aQV+psPFKQmFpIsiKsgVbtA+7nAU5E20bsjj7s/0qt3BJt89KtCorsiU7nDBRty/bpH97t0HbkSpBuD2btyPruIELm3xbWBsItJCLt+VptwbBuON4uEbrubxZipzbjzJ7uZabuYU7EaOruKbbujCbsYM5uH3LsWg7dl2xutf5r6CbsnoLmTmBu7GLuYZbuqjbuBsBvIIrvLpLvNjptgCAvJKbs9KrvJXpvNB7mY/7uqfbvJpbENfbl7v/+7bha4qiO7sD27Hc6bs28b3Kmb3D674rmazsG53Uu7LTm7i5KxHzO7f367rvG7nYK7sZpbTp+bUG3LQ8OZ9di5VX28AO/MDxk7WxsrVaCba1asH7mavGt6sjIbVA0VBmW30Ei6/MGybEyhbGKlH7u7f9q73+G7/0qp1+W8IR27tIm67Ou67wW73C2YQzvMP2C7vFebLdy7tADLhC3LJEnLo1fMSI65k9zIW0C7dGDMCZybOdS8NBvL0wHMX1OsXj67FXvMTG+7/1i8T4a7Ibe76126wT2a1grMVoDMUzObRx7MSfa8POSrk2G8Zpe7R7zFt9LMdP7MLFS7Ogecct/2zGaTzEa5zDf+zHdinFbEzF4qvHb2wyiMfIL9zEVqyvziIggizK0+IAATpdp5xdC+yfqwygmRwApvzKsQzBtFzLtrwpt4rAukygGIyVuQwoCBDMwjzMxFzMArETxZzMyqzDi7y8CcoryhzNwvwS0CzN0Vy2azvCzMwYJ4wVKcxQK6y+eBy34/yc3KrJldvJW6zO2xrD1vrDzezJSbyzZJzF5XzJ90y+55x36WzInOzPXVzHiVzJfjyeA6jIZ1zI3CvQPozQXCzPjazEj1zE+BzP63zIQjvQkKzN6XvDs7nR6GvQYujQ7KzQAS2YGk3RJJzPYgzKWEy6LB3JmHywZ/+yyc5s0XOM0dZpxwRNyHmss2M80Ux80f9c1CeNyA3d0zEt0gIL0m1ssP5CyjsnAFKtKLH8y7x8wPCVRrHM1bf81WAd1lh9wVodtr3MymPtJwS7zQ9N1DVazdaczNQc19G81jK91GBZlihk1xzN0ZNsFXwd0uj71+Eor2xd0j/9yROrjIZ91zht0jzM0FcR2E9tyfrszkBB2ZZ92DqtsWei2QU92LYJ2j5Nzo+NnPscAKSN12hL2EGx2qdt2gmN2pj9E7A927Ld1u3sxYU9rZxt1BBNxyiNkY3d16INnrZd3ILd2qOt3JUtyc3t246N21U8z0H92c692dMd0fSMktL/bdzMjdw+cdu6DdluHdnDfRVp7ctn7crrjZVeLcthPd/07cDv7d7tLaD3jd9l7Sd0rczHbH3/Ldfbjdi5vaFwPeDBPNcKPszYfLbazdHdnGBiqcLm69QR7sY0vVEkDdDB3dlq/FTwTN0Vbd0uXc8wHdvVXd60zdvv3OHAfd7v6OJwrNQqXuLcfd08jeGhDdUjbeMkvtKKLa4frdK/fdMmvthCXcZIzuIHDuKOLOIw3uQGbs61zc+DzNra7do1zuOlveLCjdSU7OVaLrFEvq9M/uExntMLnd5fDORODuZQLtFSDudVjuNhntFJTeY3LuRJfuYgO9RsTuUePuOSbRVV/710CpHocHLV+Y3KrazfrBff8V3fln7p+PnoshfpkL7fkt7fXgsoDDDqpF7qpn7qStHgxHzkak7oI5LgCs7gqo4AxTYu/hXCv7rc2j3hVvHNFlTr4kLAfB7kLa3kZwLs4SLsRl7ghb7bhy4UyD4Ayi7o5j3orY7eYm4V0T7taS7j1u7tjineAbDt/bzm1X7uLf7sQUHuWd7nrG7obg7tbzbicY7nc97dxz7vU37t4J7u8b7u+m7nzY7uoZva7E7vd+7nOX7iixbww17vCp/nO80WB7/v/f7kbZ7t8r5rCD/wif3n6HpqDr/s4A3IrzzuI0/tHw/xxQ7osbY23G7P7v/O7Bmv51dR8QJv7isv8Z7dJTj/8Anf8iE/bLLW7sRO80dt89pOe9sFAov+Lo4O6uwt9axM6f0yy5ie9Vrvh56uyl3v9Zq+6V+/ErMezAFe9guO9Pz+7bQB6wMu66r+4CKs637M60Lh6wV34SRP9zN90Dnv6oCP7UovwxbP9gRv5TSOzkbP8moP7xr/4n+/9oc/r4mP5R2v8xif9BO/53v/3H3/40Dv8Zkv+Js/5p2f4T7e1Kff4ya/4Wxh05K/86KP+Ore5av/5fZe86X/5qGP+XKu+z2f0io/+hf/+5of/Jw//MYf+8Tv7P9u+8qf+4Ff/JRf+4p/+dNv+P7++ND/3+3a3/yTf9mV7zCwT/0RP/vV//xQ8qyMfiZRv8tmTfWubPVQv/X2f/+6N/Z4qP/7H/ZyyOkAAUDgwIEFBAgoQFDhwoUJGT5keBDixIESKVK0yEDjRo4dPVq8CBFkyIgCSIo0CcDjSpYnH450KRBmTAAHGQzAmVPnTp4JZsb86TLoSZs8jR5NQFPh0JBMmwpQcFSqTp8plcq0etXpxYNRp05NehWr2JpZlXY1+kDtWrUQdlYlWzbuVoxQe7JlS+HtXLNA+/r1qhPCAcKFCyegSneiYpR8A+d8YNgwB8Q5w4plXNLxW8mSHyT+KzQ00dEk0VLtbFgDVb5kM5d8PCBy/+rCenHCdV366WadG2gfttwas26udi3/LrzB8uuFzJcSr/uYAnLCn28L1wp9sXGcvqkfcDsA93Ds2QMn4PD9gO3L2XPzHjBdPWXxzgnar6gdZWAN6g8oF688mvDDKrDZDKsgggo602s8997LLaoOMjiowoMUTA2x9s7ST7MIj5MsAgEikAxABzkUcEC7KLTQQhI9uw7CB4d7rD/DKpSMPQLlktG8ASZs8cIFJatsQxV75PCxBD648SDDrKuvQw/J+xBE5FaLkcoZfRSsSQEMqyzKFP2CDycbaaMvyy2PrFInEgrDkTAsb9uxTimb4+425GxTE0UtudRJvgPiTJPOO/+fG1O02AYYLDUAg0OSzDYFgwCCCivl09BESSszp0o//TTMPtlcU8VFdaoQKTs3NS3Pr3pitbhOd6IwA1UPvQ/X/GZ9FdI/SQXUqAk7uDVWWSftlSqHlDIIoauaXZamZkFIoFprr8U226qijQlasbx99iBqtSU32yDPRTdddddlt11334U3Xnnnpbdee+/FN1999+W3Qm5dAlfag/49qVkDDkY4YYUXPvgghh9+uKtvB57Y2WeN3Y4vAxrguGOPPwa5AYchJhnhgwiIVNK4flU5uwYCgDlmmWemOYCRSyb55JRFW3nnVvl6uWaYPRDAA6FtFgDnknVmueWenYbar6D/Z8bAAgsqtFoCmm9WmmGmS4366d2aFm3qmCWA00nCHJiZ664V/tpPssWuy+dWzYbZy8IwaDvpt70WAOW5f6Y7bMKHw1sEvQu7QGa3/244cLvHLpzTwZ8y+wLJ4iQsBMf9hjzhuIGVu3LKS+fQbAs2V7swrWF+HPLRDT/c9GPBLjvmCTrjvLCYY/97dp4nt33Xy4ubGgPeW2ccdtBDj1xw3C0vvu7j65ragc5EfLEwC35/HnrhqZ++euPLx1xmtBHE0LDGnYdedMmvz9j8+tFHfubvRPg8fpPnx5/17PcSXfEIcTNb32/65r/oEU+AA8STA7eDtwCsjjavAx8DDTC+cdqhDoLn86DUaKa51Hxga+ELHQdPF8IPEshOFAzABGQ4QxmybYEMVOHtAlg8FxawLDA82gk1mMMHshCCPQRaEJWItCEC0Igd/OBY6IcSIC4Rfjh0Ium02K2KESwkAROYAKq4RAcA721EhAgYu0WxZwUEACH5BAAKAAAALJgABADFAO4Bhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ5wF5ecnZ20pAic7l5Tw6SEQ2fpgyMlBWViwAAHksZcITLYomVy5nZ6oAAGYyc9EWFgAWFnJFRbQYNwBVVTInWrIlJdsMF7vS0s8PI6kcQD9fX58fSJ8vL78fHwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQcXHgwIcOHBB1ChChxIsOEBAJo3Mixo8cAFS02VCiypMmQJhGSTBlxJUuVL1tm9Higps2aFDyiZLkzZsyeJYEGdflSqEiMHjfcXOqgo1GLT30eJZoyKkWqVbGeFDBz44WlSzc41TpUqlkAVi+SnWo2rdquGkOAXSqBo9uFd88azFuQb9+1UAFPRLqRwtylH+wKvqrXp9+BjyEvfhhZJdzDYHNqrIx2cuO9nke2DS1aKuEAJDCD3ciZ82jTpEG/ht1V6U0LERLqThihw+bYfz8XBS55tmPiki/fzL27+W/jwssehz58NFzDNnE31+0bJHKY0bfC/6bO83tnuAE+qD6g+fn48NJ/mm89n2tHCapDjCUPX+376f/Jh14AE2B2wX4B9kfZd/Txt9WADhwmFoIAKshYhfI5ONSAH3XonYYWFodhdQmWx6GHOtUX4oUZlpiVdSjG+KGLK4rY4ojlwSijhw1KVYBPBST040tBCjAkkUICmWRMRSqwwI4dLomkkUpuZ+WVWGap5ZZcdunll2CGKeaYZJZp5plopqnmmgkpQMCbcMYpp5wINDnnnXcqICVLRRaAAJ6AwqmnkX8GCuigR/IpJAMKNOroo5BGKmShhub5JJQdLYBAQpdiutEC9CHQqacBgCqAqKR+Sh8DA7Tq6quwxv+aQEKoplqqrRsRkBCu7h13IpSn2UqfArEWa+ys9vGqrK4CKEvfrzsGm+qwxlb7KrLQKisjs87Wl22M0pJKrbXWYqstr9zy+my336I4LrnHhnsulOniuq66ydpbH7HwxpvvvJ7WK6y37HY7Gr/9yiovwDEKPC3B+LbL474JF2suwwHviq+O+kocJcUVw3oxxvRqrC/HA3v80bshtzoyyduaPDDKD6uc4sEtX7swzB45LC7EHRsMG8I5v8yzhz57em/QGw+ds6tGH/1R0pgunbLQxxHdctRSd0Q1sEBf3XTWT7u8c9cBfB1t2DVjLZ/WIXONtkZqy2h122O/XfYAcs//XTe4bP9sM4V6l9032n+7G7jS/z6Ms+Fnd534xDQL7vZwcFd8uOQyO05bwXljvvfmRy8waLeJptTkqD87iW/qJjWZwOy012777ci6ri+bvPfu++/ABy/88MQXH6abld5ZZ5vJ57mn6pM2PyeilEr/JqJV+ml9nNRvLyjrmGrKKa6mjlZrqqaeT2r5sKnvafrgQxk/ppN/XDnjgyt2f9WNz51r5z/bH9jyx5rF8Y+A56rfygw4wMuZCHT+8xoAGSfAtSFwRr6CYAT/16zQPTBiDszKBSMnNQXe7HMg9KAINbjBtE2QfxW0W/8oiEKmtZBuLwRbDAE3QgZa8IYcDOGD/1gYwBqKDYgmJNxwetjDHSrugsvK4dqcOLEmGhFvSJSiDKn4MStmMIVZ7ODJrmg5FQ4RjDdMov7IiD8hboiIEVRjAbm4QC8KCI7+k2Ov7ohGz33RhmnUIuDomCI7LhGPfhOk4gg5FkN+EJAt1CMG+QjJIv7xiIEU48zYeEA3ToWJQCzV6V5XJd0NzJQPg11JVocvVP7MeLCMpSxnScta2nKWyPPe8gSQy+1hj0nR8x4BuifMX06pAIyKlDKVGUzvuTJ8mxLA/GTEvuO4L3y0mmaMqikfVkHuVNpEUTgbpkjKcbKBZhxK5hJGuqNJ8m5lHCPZvglFdJXTfuf8YTqnsv/OfrWTZ+/0oQwdmZV+wuufMAsoIxVD0K0YlFwIJZlC8zlQTwbmoeUioTvvucCFFrCh6hydRgHK0RNeEoubnOfTIoqxiZ40nikt3EpHmtCSKvGRmPSjTItGU4nadI0vbeM+LyrSGebxp3OkKA8tOhheFrWeuHIpJXNqyZ1uractReoeD9nHqoqOnqF0oSZ1ytVK0lClPDVqIsfqVZyilKzlwWi1WMowqZaVqme1atywWletTvKub21rQZ8aVru6FaZwHSxYQ2nYFXY1r1+dqVq7Zjq/Pi92bRrnxJ7JP1WKRHa4C+3tMouvW5r2tKhNrWpXG7xluvZRDGjSa2d72VX/NtOXt7WeMRVFKGEOM7fSU0AC9saAhAxXsubT7EfgR75QKdcjzLXVAo77tEFR96oCXepQmwpKXl23Zdb9pkd/A9JPInJH3w1ZeJGr1Ccy9SrdxVV6K7betI7XO+UNTHxtNd+E1Re79+UrUKcaWE/1t1//3Wt23btd+J5XRgeGV4I1B0+hypPAiCVVhMk1YXZWuJMNXtB+U7Vha3XYnx9G54UBm2ED7+3EB02xPld8WAvz98XGFW97q/heET84RiWuFowhKuOKhvgtP0ZRkI015IwumMdHxstkjSxfHAtgyQp7chd7jOTHYgrLsWryXIusXRo71qxftjKYdablOnJZ/8oj1rCadRxUEJv5jGiG0ppfJWZ/BXjKg9zxlr07Z/bWWcUxZbGNSVxo+wrazVEeSZwNLNpKz866lq50bT9LWn1xFmyetQgrPf3cFLH21KhOtapXbcteWm+Xrg7upkUN3OYR05mznkifkjnbZdY6eZ+mZjRLzRFuDuea8stmc+vjTckie0fEliBbIVtjOyc6roRlrGXbXMg3j0Su/oKqrRqLZ7zC0GmLDaO3QTPpc6MVu+JOFbnf6GUdoluy8SbVvM1b7yneO635zti03Y3hRQvWodlWd6TZnWRzvlvBAafftv+c36aC22ICnte+9dtwfD6cwoDe6MDtfegZX1ux+P8u7MQf3e2F/+XiWY54yUbu75JT+eQIT3cm1/2XdpNcryCX+Y42zt2Od/Tf8FY5zbfI8kbyvCUwF1nGE7hym5cZ5yHVeSSrXnBrJzbnKdf20gNtdQbfOethV/jZ+Z3nmgPdwyGHWWXHrrhQ67rTp4x2Ae0OEdBmOrR4fxirB0/4whv+8FqKdfNg7dvdQq+3xfx1pRyPWcjj2vK+1Huphr3s5C772dR07uc1r3kc0t3hXUf01+ndds6d3uOpN/nq2W7utT49OUY3aexvvvXXH73sUF47x/vt+ttbJvc3PXPtEcd1RXv94KxffvFdLhPkD9j5qt8g0R1MfLLv/uq9N/7/eayfVOAPWu1Yp32BCY592Wu/+dXOPrWVv37bUx/33V9k0xmqdPFPHb/+F3fnt3P3d3z5h3rtx3vv53u6l4DgB33q12JHxYDJV27153YOaHYEKHxFd4Cwl4HBt4HpN3yt530gOIDhV4Dj54G/930amIIcyH0lqH/mB2k3NHeoU0qlF3g/w3cPMWp5V1qIN4REWIRGWEs+yBB2EnlUAkyYp1uSZyiUZ1tPKGtVaGujQXqiJ13KxoX1AXrb1IXoMxoB6HMYGH/u5ylkqIL/14bc5nS8soYx6GMs2IBoqIBqCBtlSH5bdYcPmIfHsYd1WIHRd4EyIocj2IEziIB++IKk/4KIs0eC0meCjRiCqQKJECiJhkiJ9CeBUIKJ82eBnniGnWhwgCgfgriIH1iJKPiIesiGAmiDiSiDk4gioMh+rCiLkaiIteght/hzJ6iLmciLm2iLrziHXaaKLRiMLYcrv0iKomiKuFiKz3eKw5GKvbiK1Ch/mPKMTFeDzTiLdKiMHuGNnBiN1TiN6MiNn3iM4piM2biMuRiOl+iOu0iLxciI25iGmJKEC1EkOeiEwSZDAwk4/ngQQPgwBak4R9iQDvmQEFkmipc8jMeEB2kQfVI9UHiFwJZrfReFgXJrmUc+nOeFnueFYChOW4g+Yrg+UUSB1zePcIiMcMaH2rJ94/8Yj3a4j3g4gdiYj9q4jvxofzQpaTZJZo4YR/DHk3+ojoU4itNXlAw3iDHJlEnpk7Boht/ogpa4gD8Jlef4lNJIlO9Yk1RZflzZikoJk2jJjDNZlkZ5llTHln1olV0ZimKZjmR5jzkJlPJol2qJlVLZc0f5hvwndl85ltCYl+zIfHT5VzJ5mINZfXJ5k0splD0JjJH5Uf2XlYVJcZ05mfhHjoQYgYoZlXA5laRZlZjZlHs5jPgIljSYlsLoPzhISgK5g7ykmxdZEAnZOroZkcI5nMQpnL1JEH3ihMc5EMmpnNnznM7JJCDSVNNJnTQSHzdSI7KBIy9ynWzhnYGhnVL/Bp7WyZ3iQZ4sIp4tgZ4LUp3pSSLmqZ2V0SPx+Z31GZ7qGRz3WZ7ZCZ/9mSP5uZ77+Z4AOqDtyZ79MZ8qgqDjyaClEaDg8Z/daaD+4aDbCaGdYaH6SaENyqEPCqEK6p4H6qEXKqHiGaIaKqAkuqEmKp8MsqArqqItap8g+qIiWqExaiMzuiIomqMRuqP86Z8B2qNASqATWqQjWqMpqqNCWqBIiqNK6qMZuqQ/2qRHOqQ2SqVTKqX0GaVP2qFf+qFWep4YSqRjip1nSqNpKmrQyVvLKRDNeUxt6qZz+njFead4mqeDN5GVUpGX96YAEKd0Gp3HpJFWqD3FRHolyZIw/xqmJXpsWghO5POSAXijYOqWkimCfAmlaxqkm0leoZmaLNqpRtqaV/maeKmmTkqqfSmbJPWYXcqqnPqpAIiYsGipYkqr/0ept6qlSHmXgimqMiqrl6qrsZhVsNqoxJqrgFmbwbqpxXqlq2qsQhczyYqrjzqtzUqPXtmrXGqYnGmrolml2kqmtMmta1mpvgquoCquwsqk5Yqm1Bqq0Mqs5rqsqqmTfXWt64qthFmZvDquW+qoozqv7lqv2SqtCmuqwIqqTqmqCyuv2/qWG3Sb+gKogvp4GOuRP8iDjLOQE6OnIjuyJFtLfGoofopbTSinhDqohdp4HKuEijo+Jtmv6/+akh5ibCwRsO9KrhELsQwblCXBswhbsD+Ln+BYmhZBtLCZpPhqtEH7lybBtKmKtAQ7rMZqFlT7sFb7tFg7saw5tPbEr9/qr5QpsFurmUfrqWsbm1U7EWm7mEArsfeatVIRt1vptfDatq36thCBt2E5t4LbtXbrE4A7m3rrs3ULtm25s2OrrmW7f42bEoerj4t7uXTLmHK7tI/rrVe7t1G7k1PbuQIbq/FqmlwLt6TbswObuK1buDFRuUI7uGyLuairtqMbVZeZubRbqpqbt46ru2T7uYrLu5rYtAwhu1JrvLXLvG6bun9LPqN0sXVaeRu7srwFsh9zvSXbvd77vbz/c7KBkrIbeb3mW71UiKh/aqi2NrPS1Hk2+6042yHRhT7KK7rO67Sne7ynWUK727v6y7fw6JfICrnE+7qM267oV7Rfa7uEm8C1usDIG60O3Ly3O5SOacCu+6uB6bC4m7+zKsBmuZoa978PXMG+e8GZmcGeu8HsGsGaOsH2CsIU/LuuycKlq6z7S4yuWlPD68JmO5r6CjA4GcIoHMChS7HdmsNBXLwqfMOoycCgS8Mz/MSnisOsa7oiHJckPJcavMMpzL96icVS7MQnTMX5SsD7+sVbnLBJnKmp0hTCK8dxHLP/aMcIiccYqce+OXUO4MfgG8iCPMhfcr4se8gum8ga/4u+n4UAjvzIkBzJkvwjtCLJlnzJWnzERlwWfnLJnvzIDtHJn+zJ7hupTYzAmixl87tcLfk+96u0FnzGADzAPexTPwzGSIzG/9rFlnnLbQy1uny2Q1zCvpzKVSzLYtyYUSzDbhzMU2zFDUvGzAzMs1zD0NzB0uy3uVzNx8zD/StybGzMzXzNzurBmxvL6BzG3jzGy6zNm8zN45zMGNzO0LvNyJzOfcvO/lvMzmzG+EzL3/yq4dzPqGzDV0zPHwzP1HzPz6vM+zzQCt3ABh3NCH3O6nzR9rzODg3OLYzL7yzPK1zRwPvLEh3RQqzGRGzC/2zNDJ3PG+0hdCxvCRHTnv/yx9i7yC2L01OCQDbN04T800Ad1IasyNbLyLR200Wd07HTxJlM0A0iyqNsyaEc1Z7M1DpM0s8cGKsMXa0cPlZ9yk1NzujajVQa1i390Ro9zzHy1fF7wG54QWwdud+qlWtd1lctzgud1iFd1z5q1ivdzQ2t1sbY13ft1C/8f3Ht1oUt1kpsjR4N2BmN0Wgd2Hs92MTr15LN0noNxXx92Yt91ppN2Zxt2YmL2ZF92gCtz6T92PH8162d2Vw8zBqR2EC8rnS92lhd0Cbtz7CdxrVcjnYN1p+92Qfd2aU93L2d16Jd3LiN11nt2sqN2iMs2wFA26z92tId2yi9EUOt00T/nb5GfXfHyhE9zSs2HdTond4l291Jjcje/d7t/d2NTNWWTMmnQt9SjdzZjd3+AdX47chT/d+QXMrwK9eKvdWZ0tXy88phu9u6Ddra/dtr3NG5/eDETdEPTeHO/dzLjeEczcRtjanhKsHuHNr7Hd2p/dI+DNEQzt/Tvd1erOGGDZoHO80l3eIo/uISntL87OCmneKCLdAy7uOHPd48zuLQfeMXjs0iHbg4ruTJLcww3stIHuUc7tJBvuJD/uRXDuSVLeQgbuAirsAxXOKQPdknLuU7TsxVnuZdruMBreVhrtg0TuL1jOZ4buJePtowPbY0jSk2zd7xDd9U6NNGrhHn/63eir7owynonIbU4K3Uke7egx47DHDpmJ7pmr7pKiHgkfzjeh7qouHf/x3gno4Ae+YqxfW+NSvm1/0XCF5sCg5tqd4q05vF+p3nER7nH1HrA3DrZWzhVs7bWP7lHeLrwG7jb37mou7bvO4RyK7Swy7sbm6AvBztPc7lxK7rzq7aHoLtbc7tOb7r3n7sVhbuzT7u3a7ivX7uW57ky77uWd7uZZPsZu7izJ7v8m7s9F5d0l7t1C7uav7sHQHu7z7tHFzO6OXuc17bSQvHclbv/y7w8a7uu0zdBt/wrw7lxc7n5i7x2Q7v257uA1/u/Z4z9n7nJF/xHL/nzP3tDI/rwv9d5zcG8uiu7xZf8uwO7TEf7KCO8zo/79D+d6EFAjONK4Ee3h8J6Y9+vYZu6Iwe9VJvhI5+1FVv9UrfsUyP9Uxy6o5s314P4Lm+8iM/6mFv6p5O4K1O5zcbqbH+KQw+uRte9vi+7x5vyzdf9zl/0mse4xpf4Qk/1uY80nMf8GS/grwc93VZ+D9v9y8v5zIf4gZr5wmt7YYP9HxP8Ed+8AAf+I39rMpO93sv+kHP7wXM+RRP+plv8qf/94xf5NVKTiGP8GNP7js/4a4/45JL5jAY+a4Owbu6uj5f+3pf+nff+r7P9g8/4mWu8piv+tCP+NSt+JAJ+MR/8VNO/Y0/+pf/7/ge3udzjPR8jJzjz5zlD6fnH6iAfOgBkOhT//7wv2pXv/TzT/9Zr4Tpn7FLHdzvsen+//8AIUAAAIIFDR5ESFBgQoYMFwJgEFHiRIoVHzbEmFGjwoEbHXb0mFAggwElTZ5EmTLBxZAHWbYsODLlTJorQcLE+TFnzJs5BSqgGfSkzZ08i3IEKlQo0aNNX8J8ClVAUpQPrF61CgEl06JRW/5UiRXriK1em4Y0uzGt2qkpIRyAGzdugqFrNdrNCBalBrlyMdA1yfWsz55SnbYd2rfvg7qFDR/VG1ixXA2NB3d1jDazZqolH0yWWyEwXoykG0YuWQL03NGbL+d1fTe2/+zOCVbH9dD68OHOFW7DZVxS8GvNkGfD7uzhN1ytA4bjNP2RagIMyw+Idh6dOEfjvE36tv43+/HS5E8jHvA2vHDt29sbfA8f/ee+FiJYUCz6+WPMQDlkEChAge6bjK79tiuvO+OmUywCASLoK7fxdltwAAAFFBDCxdgzD0EA4uOuQpP4kivAvrA7sLgK/8NwQPz6AixFDxEC8cMORULPuQ9KFEiu4CZUsL/EfquMwxk9qrFGGztLj0cB5AIMSMy8O4lE0MQz8kjZgtwJtZJMiMtEuIrMckoRA6sOtObK1PI8LgljcgDwDhATSzYJo/Ik9RSTULc2EzRTSLcggCBAQv+x8zPQLnM0iVBHHY3yzj9dupHSPGcKsCYlN2VUqbIqJS5JUOWL8yQAM9B0VKPO9FQlJSlc9NKU/uMgVVgXLbVVKXcqoKgCBOo1p18FCFZYgUBIIFlll2W2WZuKxWlYaGEaFllnr20WWF9b5LZbb78FN1xxxyW3XHPPRTdddddlt11334U33gCnbUlaXrW9VwAD9uW3X3//3VcggAce+KdtiT2Y3pAU/kpVGw8zoAGJJ6a4YosbEJhgjfkViIA3oXP4LFEPayAAk09GOWWVA8h4Y407/pi/SWl0mNOSV8ZZ5ZZdLlgAjxXFc2YcY2745pQ3mCBppSegQGd9eSYYZqD/QRZ66KmlMvpkClZzIOWdofZX6liJnnlk47I2+bYNvH4a7H/FDvpqoc3GDG0RfrsA5a/dDthnspEM+Vaqz0b5guVC0Lttvjn2W24Vq5bvb7aynsC6pk3em2+4B3d8UroXNVoC6+A6OXO3N5c5bshDHLvuADrwlsC+8mZZ8cUNQL1hwSH/nLCSwdUwrglKt33x3B9vffWHO5/89dhfjIv22m/v93jAd6+6d5DRXk6ExKln/OfkVV9de6xT3vo2tsHvW3zyOVfe/KJVrhy0y4lnH/fGx4e//JpDFgjaAmC4yXzAafmzHluwN7f/kWxlEoBgBCHYtfWxL4Fbklyb5KcZRAHmbGWmA9sFYbPAsjWQcB5EIQihJkJAMU+DJnQdCj2oQp6x0E0ZzAjDNmIvYyEsXx2UockcQEOX2TAhPOyhDjVSgIAAACH5BAAKAAAALMkABQB4AO0Bhf7+/kc8igAAAACAgNDN3Ds7O+no6f4AANvZ5wF5ecnZ20pAic7l5UQ2fpkyMlBWViwAAHgsZcITLS9nZ4wlVaoAALUXN9EWFgAWFmYyc3JFRdwLF7EmJgBVVTInWrvS0s8PJD9fX58fSJ8vL78fH6EfRgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQcXLgwIcOHBBMSCECxYoADGDNipGCRokOIIEMa/CjyIMmSBSV2DGBBo8sGHU+inDlSIc2INm8iFDDRYgaXLi3EzKlTp0yaR2eqtAgCqMsIFpMWLSmVKtGbSylScOpyQ9SrU1FWFTk2ZNaLXF1y9Ag2LNm2YuFa7RlARFqgFcu6bSj3bdisLTVWgJCwcEIIHtjuRdrXbGOQZ10SNkxZ8eK4fx9DPLs142DKhRMH0HsZZ+bMdCluuHtgreXSjk9PPRsgwl0QQ2H7na35Ie0AEtJmyK0bcm+GpH3zXNmAq1DixZXLLvp7pfW8x0snR57d5PLr4L9G/9/cvSbq8OhHl9+7ne/59ODbEzRQ1EBC+jrtC8B/U7+CBfBddx9WoBVo4IEIJqjgggw26OCDEEYo4YQU7lfffQgQoOGGHHbYoQIYeiiiiCBamN99DCig4oostrhiZggAGGBHCyQU44w0ylcTAwP06OOPQP74Ho5fpUakepkpEOSSQQ55pHpGEqljSgIoyeSVAzh5ZHUzTomTlVguqaWU3z2JJG9ghgnkmDhyGaCXAyWUppo+stllmU/CudOcdGbJW5Rt4rnlel/2ueafZrIF6J1JGiokoom6CZ+eAMjpaJ2QmilpepRaeqmf1AlK5qJvEhpnlZ+CapSogZI6qal7pv9qZ6mucgprpah+OuurtaLXaa6X7mprr+H9yiedwvrKKqNoypppnsuW2qiuzw5KbHy3ehpstaMmemaox6qZbLHRvjrtpfzR5J+M0P4XqYn9yZnAvPTWa2+9Fear77789uvvv/2mO5N+BmQ44sEallgwwggrfOF+BjPcIYzsElmjADc+eTHFR46L7bUCZlsueB6HDLJ1v55cUckoj6wsyytz26q3KT8J81AqQ7dqzgHcXCTNIvPsM3Y8Y+fz0IoCfbTMdxb92s42M02r0lKHhzSUVIcqdNXDZg11x1wr6/S3XxN59aYvh23d2S6Tu7TWQI9dM9hwvyt30FHXrWnb2L7/XTa0d/tN4N2Bq72SwCit+667eyNekuJkAiz55JRXbvnljotEcMQSfxhi5x46fOJ+KbpoOosca2xjxThuzBuP1OoNuNcEhhsm24WD66zs1tLOmO1Y4u67UsA6Kvy75xpvuMnDxwX8lcfvnbyh0UM7fZ/VD3o9ssu3nPuqzzOZPZnbi9v9Smi73Wzsf/eO/Prb8t7t+7qzP3jczVsVvpjn45z/W/trUv9+Rj/w7a5985Me/JQnv5kVsHYHvJ/d/ueYAAIpcyGBXLtYFygMgsQ/9wrhvS5HwhKa8IQoLIwHIbI50IXucy7ckOjiBbEYaih1R7pYxnL4qx3OaHyBEhxj/whHQeNsrYFNKyJ5johAByoQiegBIrOgaLUBEk2JymGiBPf2vS3iSIrSGiAYzSVGKybtgUPMWxOTiEbiaTGNE2xjXN7oRvzJcS5q9KL7nrjGKJoRa3d8Cx3naEc+6jFAY7SVEOsYR0PC0Wx/TF/fykjFrgXSMTxb4UM0OCjGQUuTDOFkqVJIylKa8pQLAuVCWmjDhMHQhjNUF4pOR0sF4NBiqzOT60IFu/j1kVdY5I4FH1VJsQWTL8PEVDHJ1cXfRfCRs7ukcZLZo0SmrX6+PKQTrbdA6kWSbyEr3+2+2UziUVNV2mSjI835TEZy8ZjeOac11YdNBv7Skut0XjsJ2f9IbtbTm8v8GDxrIk9yDpRKBQ0o86RJnoTe05gMVY5D0zm1iApzn3jsp/a6iT2DWhSZqVLlQUQZKE8OSqQGAaEIV5oAVLr0pTBFIUoLwspWKoxzMYzlwF4Julu2LpeqE5kPEenRfGYUkgr13kFxMsijRtOogswjNPfoz4eurahVpWgVk4o+cLZskfx850e909SoapR8lLSqQMdak7JispBZnSpRueo/tlLJrUaE60bp2jOs7lWtHZnnJPkq2HCmVav4jKs7v+hXtBK2sUE8rFy3+VfEXqewXx3gTOcjJw7eyaRk2uxASGqrmJr2tKjNl2gFUlNY8tSFOk3cayXm0xn/6dCz8NnlqoYKH8x2FaxOpWplJ5se3+rMslfl6xXtylSpLla4jgWsRYwrHsk+N4GKDStjlXtGqL6VbtLtajm1+0PITjG8FKGu0axLXuwO97q9NW8YH8tdQHo3r+BF7lrvu0Tntpey0dVvYOVLRvqil4D8zaJ/g+veABPXj/WVpGH5uloAkLZUoO3gwzJc2tR6+MMgLlCFW5vT2XYuto8zMcJqG6Db6rKHuN3qgZebYO7gtb9njayBBaxU5p7qxgrO8Xl5HLMIe/W37GUwgHV8YPU+jcjpJbAik2xWsdaYL0C2sV4dDN/iSvmaTf4yPcNs5PEq2ctlXuqPF1zlp2b3/8wQnnF339zm8qbZxzvJMpa3zGQo9/XOVyYrm79rZToTGkcj7uziYmyrRFeJ0SELsaQnjVpHW/phlx6Pe3gznVVpmjudJlCoP32qURPP1KT+FaqtQmrvrHo3oWq1eThN61jL2jS19nSub63qXTPm1ri2tbB1DWxcvTo2vm51r4ct6mIbO9mYgfanl03savNaZMc2jrOfzexfS1vT1G52t5WN7W8ju9jh9va4U13udbPa2ek+tbmjE+9ou5uFmM736DJNPEr7+9+m5Lcs4TVwS+O0c9kmT8KVw1s0W1vdD+9ynMUdcXuTueLvvveh7YxxWHd843P9uLbnjWOkilzhJP8PsskpznKI+3nhoE65lvPr8prLe8c2t3jLJS7jnes853DuOdA97vOgk0zmm9Y4flc+9HOfXOXbfXrMlV7yqBed6E1futWzjnKqQ53jV3d62LUOdq5LB+mCpvnNpZ70Awt8pwSHu6M5rCyA2/3umNM3Dd8u2xraEOZtHzt5Gj5xs0+d7Wln+toFf3acL97wgX+w0B9P+Z/z/OhePzzjZ654y3s+4xfffOQrb/TLon3Wmd+z2j+PddLXOeSid/Xp7zrokade9o5nvdghn/itu373vyc77HmPesS3tfZdNz6VgNt63ds+9MRf/uybu3rQx774L59+qbWfZ+Q3XvnBdrv/3gs+frjTnVx4T7/6A1b+vvM9xX6PIeBxD/5TEX7yzk/+9Wlf/eZbP/rU13n/F3z6J3mYV387MX/H13/Al3/fl323h30AuGYM+Hz7F34QiIDcpoESNl3ct4EXSIEC6H8k2ICll1wc2G7QR4APOIHdV4EFyIKaZ4CmF4HSZ4MB6HsOOIMyqHojaIIlaIE0iIIhmIAf2IFFloIKyH8/KIQ9SH9+9n6aMyD79jA8s35YmIUTIoUZRIV7137stoQY+GsFgwBmeIZomIZmKIbbh4P2B2lE6IIgKIdIGGVuaIR3iCt6BoV0iDcw2IJPKIGXV4NKeIRHZoeFmId1+GeJmIJ7/yiIOzh6/+VwRTiHgciEOjiAkciHgxiHlziGnyiCmRiEMbiJC9iEpaiJqvh68aWIKpiBlVhvJ7gSbIiHjuh9PGiKN7iCugiKvSiKZReKthiLhxgAXPhBXkh+o3OFWtiMzpggx8hCySh3YDhtr9iHFqaG2piGtWiJv7gT93eAxNiNi0iO19iJtGiI5viIuziOVAaE8OiEk1h4wuiNqwhyrdiIxMiOvniP8jiLA+aK64iLkuiPqciKlIiNhsiPbaiPQ5iOArmQBMmJ32iPCEmPFSmLF4l/BgmIGVmM5jiQf5iLHUmSGymOCumKmVSNXRh37meF0PKMMjmTLgl/0bhJ0//4kuh2jh+Zjdv4kyFpiOFIiO6okhMJiSVZkCdJlCl5iyOplPF4kPiYkPWokVOJkUlJkVl5iqMYlR65lZgYjD0pkU+plaT4lUvpiWNplGWJlGdpklfJkW8Jlf+YlhDpkFUJkupIlqiIll4Jl8KXj0XplH0JmFJZl3GJknkpkoVJl4f5mF8XIDcZSjlpkzA5KDSZmVo4matUmVNYk59ZYREpkGX4k9oYlK44lGoJlv04l70nlqzZkPt4lO3YlLBom33IkMOIm+gYkHj5kbppkX/pmJEpmLwJnLTZmsNplonJlIvJl12JmNIJmZwXndRJnIZZnFS5loRpnX45nd9ZdbD/6ZpuuZxcOZ7mWZt5GZxWCZ7ViZ7uiZ3y+ZrD95xsSSScOVKe2ZKOxoya+Z/ql58ptZ/ICJr8CW88GZsJWJqmyY17mZpweJeDOZttqZ7ceZv22Z3weZ3MGZ/0aZwZSqGN2aEcWp6BuZ0KKpweep71eaG5mZyyeZwA6YGjqaEtmqLtKZ43Sp4WiqN6WaMi6p3ZOaTzyaIg6qLrCaO7GaIP6ZsT+qIVqpwr2qN26aQyyqM5OCMCSlMEKo0GWqD+CaBiCnBbyllf6qVl6hYfwQBs2qZu+qZwmqNnB6d0+qaZ0Uuf0lIJ+hZ4ClAGlCoDoKfmOFHOFCQOcKiIeqgPACSC/2qIfJIAiZqoIaBMf/ojD5AWCfAjjeqKfKIBXHEBmYpOheojCXAXDqCpcnpRmnoXGlBN3eQArDEBpJqqIPUjI8AamZo8pcoaHDCre1pBPzIBrHEAp5o8HDCsB7CogUqr8USqF4CsE3AuwoqsoLqsvzpNPnKp1HousOoSg1EBQCGrm5qCYHKsggEB4AoU57KrGkEYEOASvWqtg+oj05oR7goUDpA8nqoRheESsiqvjvoj3YoR/eoSCaCrq5ERBYsRp9oj40qMacKuB7CwGNGq06OtBJsQGhGqAMupQLKvE6uxGVGt10MCCiuyB9CqvjqvmvqsISsAGqGs11OvBVutK/8bsECirQsbr6LKTkHyAEBbGED7rzfrsT/7AEILtBwrTgNQGEzysH1oO04rQP8EJBggABjwtMxKUFdytVlLtZW6JB2AAR2gtdfaUFcytmUbJI72ASz1tnqapqyVEG4LtyM0pnjrb45WAHzbt377t4DLtwkRuIRLuHLCkizUoKY5uIXbuH3LuI7buBIxfap5HRcTuY4LuZgbuJOLg3ezuYWruaDrt50LfoBiARKQuqorAa6BHaPLuQLwuoBbuhcYJZ3hFDDxFbL7t6Iru7QLgFFyF8/hurv7uLFbvILLE9NnJCXAGlBBvMjbu6/7u5+YGs3BGl4BvcUrvaNLvb2YGoHBwRquwb2gS76b672sSRe2gawHoL27a76Yi75Y2hMeYCDoChTPC7+Rq7+Zq7yeyxb26xIS4L6+e7zR67+mSxH1WyD3qxHDwRbIm7wRXADym54AYCQJmxbDqx4TzL+Si8C12xHrmxZD0cEGvL0gDLwrERxc0bocHMEeHLopXL0r8RNOkb26C8Mn/L4z/L3WkQEREMRCHAEPnMMHbMIEsLxAY8JIrMTvwsQ6nMT/+8Q63MRT/CQNEMOGu8MFLMVfSBMBAQAh+QQACgAAACyYAAEAxQDxAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6On9AADb2ecBeXnJ2dtKQInO5eVFNn5LWVmpKiqVMzMrAAB6K2PCEy0vZ2doMXGTI1GqAABvRka2FzbRFhYAFhYAVVUyJ1raDBi70tKoHEHTDiG/Hx8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wANABhIsKDBgwYECBB4sCFChQwdShyYcOHEixUjXmyYcaPEjAgIiBxJsmRJBRA9OtSo0qDClg1fwjwoc2bBmjYH4sypEMGCAECDCh06dMHOnEh1CkgK4OhMpzChtpSqUiEBolizBqDKtOrSpFw3hr04dmJZiVa1qg16tqvDtjS/gpWLFG7crmmHNjjAty9fC0Ttur1J16ZgwngLG1b8VMDVoRn8Sm4w9PDgpoynZtacuDPTvEErSJacofLmy2RPi1W92vPcxI+DehgtuYJQy4Nx48bsui7r1LEDWKAtOcTt36jvvv6MHG3zt89jOhZKfDRgoLt7L9a+nTnsoCCqj/9mGz057+XofXNvHDuy3wsRFMpXGKED9vLJdeNH7L3/3Nh7+RXffATeZ15j/qmXIE/7EQSacO8NSKAA9m3VYG4X7qbhhef9N5R4fElw3IFRZcjhhuuVGFwAEog3gWkkcpYegynK6OFQE1Rn24gxeqRfjV4B6eN0Q4lGW2kw9tjajN0xiSBzK64VGIdCplallQsuFqWUPCpplolXfhmmc1tyaaCXYjpZ4pjQfWcmVruxBFNHOdFZZ0pI2WmTnjNVpMBPb5omZ0sVTWjooYgmquiijDbq6KOQRirppJRWaumlmGaq6aYCKGDSp58i4CeopI6EkkV5QhRSqaSeasCqrH7/6mpSGTGgwK245qrrrqrGCuqfgQq1AAIKARpsAEadKIBPxwKVLLPNJpsYAwNUa+212GabQE/GBtvtsQQo1Cx5bo774LEbKpDtuuxuS+S48AYQrgDxblimmecGmy67/F7r7r3xcjlvvcoCLGW+ge7bb7//BgzvwPDaS7DBaym8cLsIOyywuBEXPDHBial7McbvahwoxOZ63DHFalk8MrYNmxwsys1KvDLIzIn8MswZy6wVzeiqnDLLWrm8c7Ux+2wm0PoKXXPJQYd8NM9QK60W0wk7HTTRWRl9dNJWr4X1mzYPjfNcOk89ANhh/8xxyuU+zTWcyqY9NdttYzU2vlo3/z33lFKrjXTPeQO1N5dly322b3Z/TXjhhx/cd9Z/J4m24INXXbhQkVc8Odmagx644HhvHlTnLX/Od+VdMo752o/njXrRqiMeOt+jq1266fK+/XTcWy/OYOM772767F3XLjnr5Ob8uvGFL3AqwYOq5Oe3WQObcvUe+ZnA9+CHL/747mr/NKfop6/++uy37/778Mcfqae+miSqQvTXT9KsqS4Eq/4icdX/AMi/O/kPgPvrFQIDiD0zDatY45IWc6B1rGc1kEsSnAsFvcWtCMYLeXQDnt+Ep6WP8Y4oIAQclEwItxXe7IRDSaHlfMO8+9TQQiKkHAxj6LuoudBsHcsh6P9u+MEeNk2IqyMhe1i4w97RK4g/VBwUbwTEJjpRiSpiog+pKEUryrB1DLph7FKHRNsR8WFGzFoZl4dFzYjxdpv7YvO4GLwp0lCLMJTjmeg4QjuGEY8n1CMOo1jHFvJRh1a8oh9L+EJD3rGRTRRk4gr5O0L20YtpFJ0lEenIP0Jyh5JUnufeuMZRJlKRnWRkFSt5yCGeMpSlTB0pN+nKRMKSlklc5BI/mcdM4g6XZmyjV954xmhNr2Pc28j1Oma+oCUTI/i7oO2a2TT5WfOa2MymNrfJzWzmD4H369QCA4gnA75qnAQQIDoLuCeI2GpX8ISnAhdIzTc9UADSXEsGfbP/wUBZMIIbohbm3NVPexbxianc5Sq36LqBjjFst2xlLhNaIuK9DHqQ86XtYkm7WV7OoXDMKEJZ+ciFHtF5IC1msyJa0i6SdHjPe6jVWOpJk6oRpaSTqdJoqkqXMhSmKX2lRiXH0eR5tKE5Danshuq5otLtqEBNqkrBxVQyApONuqxoTJXaNp4q1Kcn/ahUhTrSn/aUkmZtjEVHhtGlljWsLUUrXKOqO536zKtZ5OVNxVpXrkK0qrRz6pSgupi1XqytXQVs8gRrGsKqdatTnZliQ3hVU2ZVM4ZdGGL/+ta9xvWSLy0sZMkqzCERs7SrySzD7CozvLoRkJrk691YqzHp/012Ss+cyDJTVk/Q5fYj+COfcMkXzY5187jITa5yl8vc9sXzubligJ+gS91ytvOA41Rndq3bp3kiULv0TMDrGKAQ8SY1MQV1YAejtaH0YnC9FTSv4E4l39mKUpaoBQ5szVTfqdEXpIw9jmPzatM39fdo/z1vZfF72WHul0sH3lmC+xpg8gz4tXo18OsmbN8K2zC/XzotvCL8Mg477r4dBTGZHiwlEo/MxMWbJGjTSmCwBsrFF4PxRWXMydB+Va433nB5AbzgFDfYtCxeC44XpmO28riWFMVwgfkrZAEsmV/u8rCFLuzgDFMZc00+7JMn6uMaA1nDYB6ygiUazCOvRv/E47oyv8Ks2TG3OcpdnjKEqyzndWW5yEZVMXTg3Kw+r4vOq0VxoN2sXy/vOc1WJjKbsYpnJDu6xXyW9Gd7TGMp2xjN81UzhQH9VEFLh9DHMnS2EI1lO1O6zJ4+M3+HS+vv0bfWtObunIrL23x67rcr4fXTesu35hr72MhOtrK5+U0AhrPZ+mNnd7FLT+8SUNeEcid1oWvtaPtaK/f8dlb2ySD3Sumf7FWWQJNqbn0e1NRxQfVcRRtUW952hjX99C9le+LInuzeYDzrjOf92HpjsrOx3TSUYe0V1bbarzMF+BwVTuZON3y09kb4vil+Z4YPyeEk8/ebXJtnfW8Up33/FfnSJL5Hjr/a4h/H+MHh7RKIl5rRXwK5n2mrMZJbWs9ERbl9Vb4xjZ980patdGplHkmWDxLpDFZ6anSuLZ47zOdvTrJV+R1jm9/V6a5OuseXbvCmGz3oUDey1HPOdFCCXdE3X7tzqE41oksJ640GelOF3m/S4nzFl94713fs9da+XctWfzpSU+53uQ9a64Hle9ftrhbbnv3XtBJ20IhtO2BzJLi4Hq7mq7ns0pv+9KhPfaKgXb9nrxPb1ut2/cD7Xdh3T/a+oj0BxY2VcANUWe2uPHw5CHzeEwXdFXz33x8f+K27XOxuv7zz8y3roz8/6manOWHk7VnqDzyQhyf1/2C176DCC7jxY8+7ydF+fbVnf/mnhvxixd9Y9MM8682PfNoXPXP4x1v+lLV/cfd+jhd/+Td/Ajh+GUd+SsF9Ced9nNZL0qd/7cd/6RdiAPh1E4iAFTiA96d+1SdSDIgZDrhxELhw0TeCibeCcKeA/VeA/3eAAdiBLkiAFwh4ejd9AheB4LeBM3iCFUdwZvZ9vIN3GCiDKpSA9beA/ldzJWh9QNhxKdiE25eB+LaDKHhClkc9mdcpxjclnCc5nvcQXshMX2gaqpeGariGbMhNY2gQo7JdqGJOAxRtuBcr0rZr1FZ7e0hAiXGGQUFuixF84DZ8/tRegOgshmhPiaGC5v9nYY74hFLSiFRYfpIoeDS4hONCiTDohFYYcD9GhGbCiTfIfDlIgVH4csFCih94hKfIgakIfavIHJH4iRMXi9h3LKwohLEmilCIhUEYKLvYfcAohbxYciG4FsP4gMWoisQYijz4Jstogs0oi884hNE4irRYiQ1oiy2Hi+6ni9vYiVWIhFcIjVkojONYigb4ij9Yjbk4i3NRi+YIitiYjtK4jq2Ig+uHieBogeI4j9xIgt6oeP/ogfLoG/TojkmYiecHL9P4i+gYjMw4kcaojgJJjpZYkGEXjxipkAPJglz2c/2oFm9YEBXBhf0Thp7DkqlzkgSxW8OWiPfRhjZ5kzj/mZOVwnq+4npyCJMUcYesont2OIfXdU4/WYezR5O+l27oxZSLqF7F93uEOG7Kp5HdWI+3CI/hKIELWZKoyJUA6ZUh+YgfVpaXmFg+2JAHWYP7aIpgGXFreY73SJHUaJHOWIThp4QPiZUEqZVXt5cOCYloWZBXyY4xyJB02YvZGEeC2ZaaiJieCJgBY4T8mIw6iJfWqJdzaY+MiY93WZcXyZlfiZlhqZkeSZqFSZkd2ZU9WJq+yH6Q2Zc2+JbtGJfviJquqZp+KZKw2ZgiuJqK6ZnIGJtu9ZugKZGimZfH85hiiZDX+Jl26ZiduZW6OZbRWZzA2TZbiExd6JK0A57J/wOUACCTm0eTFqKT6rme7Kme5Mkn2WaU0/aetqdM9QlN8qmH78kmyqEmNqIg/hkkaOIcWfIkATok/NmfA6qgTQKgDkojBbomCyodEfqfEHqgS4KhSvIjFSqgGpomHxojHBqibdKhCGqiXjKiD9qgF7qiBjqh/OGiEkqiFIqiWAKjMdqiOsqiPPqiOwqjKvqjMyqjFtqjExqkRlqkPpqkHoqjSmGjIEqkTSqkSnqkYAKlBIqlJUqj5oGkS/qlQ0qlU4qjXhqmTHqiXFpzTpqjZ5qhUoqmb+qmTlqmVVqnYwqmdoomdHqnZoqnfJqnKXqlacqmbXqjcbqhgnqoUSqmcP/KqIGqpTU6qOWXoGrKFPSZn7GHqZl6qZzahZ3aP+0ZqqI6qqXHk7Hik9WmqbenqvbJqvjJqUqZe0JZKuJ5fMSCT79HqYTqp0NSlb0XlRh0mLbJoIW6qM/plmTZm8qiqxtJmcJ6jI1arFnKl4T5gpK5q32ardI5ms1Znd8orVuqqJdpnGrpiMz6pPRHm1OorOfaIYN5ltY6rJUqqehKrfBam9Aqp+Aaqe+6ZfaXr4bqqAF7rJG5rtc6qZBKrNcJnbx5sPUqruE6m9WKr9kZrbyqrwuLrK9Zlu3amtjZsPKKrYA6sBlbsMnqsO4qsMZasup6siGLsPSasgTbshvLrgn/O6/9mnjPWrEYq61/qp3JGZw2G7Mey7Cb053b46lK2z+XWqtgiJ6kGrVSO7XdZKqsgqp8+KnmtLR0+HquqltMeatQuaw3K7Je4avHtxs7W5E+a7Fti39+ubahObIr+7YgCLATIbfKSbfTCrG3+bIDobeyua8Ka7euiLKC64+Ei7MqO648exGJm5mG27eNC5eACwCRe5qTG7GLW45xi0beapCdC7N+m5iIC7rmWrakK7HfOhOZC4uj+7CV+7d4KxGvm5uby6+za7qXe7tsGbsye7F3+7h5i7ocq7qyO7PWCRO+u5g/67Z8a7m16xDNS5zQ+7w9u62D67rGO7Sly7jK/9u6zNu9KIsiyPuXnzsulsm5wlu32qu43Ku+ztm+lAu8LNgV1bu80au79muWohu/FXRMScu0XHuU3wm1XwtcVLvADNzA6WO1pYK115bAwUbBn2fBZAirXouU9BS2EOSU52u+GjS2aFsU+Su++1u4KTyZw9lz85u7Koy9wxu0x3m8RNuCJluz5Uu2N5yWnJW6N4x4/nvC/wvD4Eu/0rud5WrD32u2JGmacgnETby64euvTOi9u3vEy7mZ3SrFWezEcIubGujF/SvElHc1L7zCYEyyW5yaXczEX0zFLDuxBnu5IjzFWdnCgRm6dxzHyTvH91rH08u/SEzIbbybb4zFZf+crnTssoMcw9cbyWEMxTuVxjLsvpfsuDSsFpQhvwLQycfSAPepW6OswFp7XTUkymapyg7cyq78yo1yyvpZwPNJy7NMwHmCALq8y7zcy74sED3hy8I8zH28yK/xKsOczLv8EsiszMnswbgKwkEcwstCwkxJxMVcyJA8yeT6w3BszDk7xORrxzyMx+grxobHx+Xsx8F7yB+byDvcsThMsyD7yFqsxnLszkZLnWSszfcMtNMptPFMzWb8r8Rbv/68xodLyWP8zQmdz+/rxvzs0Eas0Joc0DWsyA/9x/qssfV80Oxb0RAN0Nw60Rot0hwd0Yhs0gM9zfZqxfFqzxYd0vj/nMfobDLra8g13c4q/c4sTc7yXNBX3NLmXLQebSagTFWfPC6qLMu3vLX9k8pWx8qwXNVWDctOvam2HJ9ZrdW4XCfnms0ozdNB0szOLMzMfNbJHNbrDM7lZs0RxNZBTc0+nBVyTdB0XZB37dJF7b+Jt9d9Lc91jRWAzc5i3dP7PInIe9iZTNMkzZxcUthu3b+DTRSSvdFkLckzjNFqcdljzdhP3M12vdhtjdlGncP5SK+gnb2NTbtKrBWevdOrvdklrdiqXdqfPc+N/JF+PNuYrNkLLdqETdpz3cN6Tdx4bdyUGdutvc1sjNhHHdnIzdeG7df+y9zAjdC5XdlD0dWt/+rd373VmSrVq3zV5n3eDAzepIzBcFjKFaze6w3fH6HWwwzMy0LfaI3bsr2sZo3fupzW/s3L0Dy2xd3XJSwswHpu2Kzfzf3Pof3aUUzR+y3UMQ3SOt3gM+3am+zNJz3hjBzIjmzhzv3brP3gGx7hHY7hI23inM3hRF3dFE6xbOvhyq3HlWnJ2e3YOa7hLY7iLz7ZgAzTMj63Kp7SRX7ODJ3O/ZzbMS7IIu7gJR7ltA3ZAg3UyR3OZ+w26lzg1c3dOI3jUk7iz/3YXPzTMr3iYa7d0I3a8Gzl1F3FOjvOZ27kOz7iF13ba5HUkrXUzdLU4r2q8l3B5E3e6F3ohs6egf+ewX8e3l9twI3eJwwQ6ZI+6ZRe6UoR4L3s22p+5ACO6bdanp6uy6qGLeQVzRWk6Tqe5tBx4IGY4Po06tciwD9u2roN4kEGaUtO41geL7BuLbLu5oH94UKealWW65xe68MeLL1eLb8+55mt6hfO4niuFss+AM3+5Blu56k+5WWOabgu4cfe5Mpe7OBe51DO7RLt7aG25Vfe5QVZ7dc+4+Ge15QJ72A+5uae7Sx800Fh7+z+5rTu5f1O7ime72iO7iut7moT70Ru8HQO7bwr3ETh78bu8M+O8D6t8P5172Le8Zsu7VSuZAQ/60wu7IlH8eUO8ece3BCOFShf8Cqv757/y+9AEXrD9QEKoedv4ueP/tSObkCDDi9UfehEX/RqmOjtzd4o6d4XjPRJ7/QoGeqighlS/+ntXsb97d+djukDnqtXT+usrojV7EFyju0H7/HbzvInXsn/Huw1TvM33vYwTu82vuBcTtmGWfbybvGozuPT7uLAPvcvHeeeXPExf/Z3HvJLDPP4fvg2neRfLvdAvt15X/gp3/iYr/Y9zvaGn/kfz80t39CMj/bRDvprL/okr+usm+w6HPiTv+b03ObO3vfa7veKD/izz+B8b91ZnhU5XfvAv/KJ3+0ZnfrzPvjibPmj//men8Snrxegq/NIzfSK3vNc3YVBz9RGv/3c/396UB+USh+T1P/0i/6q5a9b2L0klb7+7E/7wp/2IpqoDLJuowbwY53+U7dVd4/Z+M922QIBAAFB4EAIDgYcPJhAgAAADR0+hBix4UKJFStStJjRIUaNHT1eZPhRIkeREBcqQIjQwQGWLVsmSKkwZEmTM2lutHkTAEmdPXH63Jnz5smUAxK4RHoAQkyeQ4XSbAr1KVCpQKOWJJoSQlKkFBDKpHpVpNiPZKl6NKsxrVoBKL9yRfrg69qMdC3aBXnW6tSyfPu6PfgALlKDRvGO9Is2ccfDeh82fryYLWAKg5FqgAl2b1jJdTs7jgj5517AW5FeiHAhqVfNPkWPdv0ZdP/kzaTfIo0gIELchK8nykbMebbT2rEBD8CAdKGArr2Bh35eU/hwrNFh98ya0IPL5S6XOp8eOzx1xcWxty26smX3l3OtX9fpOyj5se/n2y4qYv3ClhiY2r9PPPPo82xAp447qDKW2MPsv/HiA1C+B6vCLz0HHFjuQq8cNJBCAQlkq8PqECxquaLcm9DD80BkzD75sjtxgA0E2CBGwyLEMUUWafvwPBJT4mADDmxsbUUR66PKAJ8MWEhJnZgUwMknF/ogASuvxDJLLWWS8iYou6TpyyWbHHM5M89EM00112SzTTffhDNOOeeks04778QzTz335LNPOsEsScwpoyyzAEP/D0U0UUUNXWhRRx09qUxARRL0Sc4QwDRTTTflFIEXBXg0VEQbFVXUhQiojQFVV2W1VVcl3GuBAGaltVZbbV3g01JNBXXXR0/diwEbh+VwR5BuRTbZAHT1FdJem1UUWOOIpbZIY2tCFoQJtuV2AgtuZRbaREkVd1QBUJ22qAQeYLdddgtD8VrEbpVgsAZsDbdcRp/VtwBpfTwxOa4ajFdebG21bAJ8cez3UHL7/RdCBI8azL+CDcbJVgssO0CCWvPV9+GQz60wMI5hAg/jyGptgOMDPPiY4Yb95XdkdAFGSEHL5EpZ5d9qzcDlA76dFeRyRT6a5HSN0kDoDa3FeCFa/ysQmiVajRYX6ayVxhm5qhuE2mCpA+hAzdSS8nhZmRvWGtqID0RIMJdQUy2pzGCNreg1d3MphKvXhrjmpG+WOGfcdEvqu7DlHbvsNM92qYK/OZuZ5srfluo40xbkDymUF792bFq3syyDmCmfue1mMR8xJu467+9ilUWftV7L7p18r8sF35pwuFMS+AD2WCL4Rp8ztjVouIjOPbbdd/c9c3WbFh72A+A1/vigbm0ZLnABHxn6kg9Sb3ieZY9aAGQrkKB99yWQfGHU2ebdba4LP/HCDC9EGX2x1VdWAPU2v8CJb2knMhGR8HYeAQoQa/YzYNdONKMaKRBAnGlgAB+4uif6cTB6rbNRkIZkwSSVCXcZtFUDyNQTKD0vdfcLE5W2NMMtrfBJAQEAIfkEAAoAAAAsmAABAMUA8QGF/v7+RzyKAAAAAICA0M3c6ejpOzs7/QAA29nnAHp6yNnbSkCJz+XlPDpIRzZ+whMteStkKwAAjSVVZjJyRltbpisrqgAAshg5MSdbpR1Db0dH0BYW2gsaABYW0Q4iAFVVOGNju9LSkTY2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ACwAYSLCgwYMFBAgQeLAhQoUMHUocmHDhxIsVI15smHGjxI4eOUJEQKCkyZMoUSoIKVEhy4YuXx6MKbMgzZoDb+LUWVMhggUBggodSpToApw2BSDNqXQpAJ4yob6UypJqSIUEimrdGsDp06ZIrXoUu5HsRbMT0bYUkJWr26Be1cIEG5buTrs98UbVO5VtUQcHAgsOLKFoXL5XESc+zNip3Llthz4YTNmwY8VlMWduvPTxTM1n/Q6FQJnyBaKc76ZWfXl13shBOZSmPGGo6763cbfe3Vl0UAmzKT+wzZt179xVQadVvhZ28NIQhCIfy9yhZ4PXk06nHvnC89LSi+f/3b5Z/PjL3SlbiKCwvcIIGOCa1318Pv26hyMDHszevX/59eFnX3LkhZbfUBkMtp5/7cXX1YDUFbichGsdONR3gUUXXoDGCchhhyC+RhRwzw1H3Id7UWhddXOpCFlRHjznAGoQlodiijXaWBdsQU0QXAaW3UhgjgYSOSF6b73lInYsfrYkk08mxWOSNAq5mJEVYrmihVRqFSVTX4KpZYtIdqmVRjKBVJOaa0K0FJtpuokUnC9VpABQZhLF4J589unnn4AGKuighBZq6KGIJqrooow26uijkLanQEqUUoqAnZVmapICcuKUEUmaZsrpQqCGSumoaNYJEQMKtOrqq7DG/zqSqZXemedQCyCgEJ63BrBAdgT5xOutvwrwU69BFXsYAwM06+yz0EabgLDI+lptUAQodG0AwDI1ZZ5YbdvtVwpEa+650/q2LbLZCiBuk0x+a2a4146rULnn5utsuvKuS2W773KJLL3V2isAvvrmy6+/1QJcL7xSvtsvlQYjnLC5CzPcq8MFQxysur0SPLDHTFl8MbQZa5wnxyMLHDLIt1Z8Mroiq/yvtg+7HDPM4JJM7swY12zzWyy/rHPPEycpM9AoCz00V0XvfPS8PFN9mMlMp/w00Th3PHWXTlt9GdZAa7011F23XGbHSb+1NNP7hn32UFH3/DXFVYPt871wP/9r9txF1S12b227JTfeV/cdd96AByC43ncrzbjkiSs+wN+NC/U44muPXDhXb/eNeeaOp2105y9/vlXocI+e+eaUo76z6lqxnvXhgMPu9t6T7057UbaXjfvcuhvO++969m585Yq73njxoB8vccBjW3758FsvMOq7qbJk57A729px9yHZmcD56Kev/vrpij9ypPDHL//89Ndv//3452/opLSmdOm9/UsJqt40qwCeBFWlMiABBjinAipwgQ5UoPuIpSsBgM9MyrrMsZBVrA32KoO98SCxqMXBbUFvddJ7GPKGYrAVCgV7Wzth7VLINuoRbnqkC5zppCY7pNlwRzjMId3/dmi3HlLNhUFpYRCFiC0iDg6IKvzhXZDILeXF0ImQM6LeqKjEKDJRc1jk3A296DUt4o2KJgxj7MZYw5yZUXJovJYMgUdDz0lRRGT84hyTFznf3XEvXLTi0/bIwjqm7o99CWQcG6bG3fXReIF8ZPQWya5GLu+NfnQjG+34xSa6S5NQbGMZN3nITpbuk6MMJSdTOcUlMpGQLzTk7BCZHEWa8pS0TIwtQdnKPL7SktGTJAojiUlI3hKWSZSlD3mJR1HqEZgoFOYMiUnKWXZSe9CcIfk88r2HTXBn29xINzv2zZ7p75zoTKc618nOdqqTfw/838EeWBIGeiqCBkQgPSFo/5EGLoRVsQpoQPEZwHJ2KVe7qhYI6yLCPHXwggc1GLMsl66GYjCNqFRbNZfJyryQbWbOy1026ShNOlKzLh89WUiJN1I+FnOSuaROSi+20rMhs4olTd5J7zLThNX0ihk93UaPGNPM9FRfPx1kSwuZ00Lu1KPWS+rQbtpFZwoVpVGFoVKDysOhbrGooTmqwrQ61aXGsqmxfOpexEozQZaVq0X06hnBuhy2Bs2tNqOqMonKzLVmFa8q0ytak6nWvthVWmTNq1mTOdgqFjY5h20aYDUm2JcOk65riazfEhvYxeLUstPE7Io0uzhKbsyzVV2lRrFKUc5SFrV7/WpfDftX0//eqrJyhaNo50LaZklVsXB9Yi+t2lXWNs+1DMOtKkvZUb+2drL+wmZw9RZOjNwLonozKHUJeDD2eZd9132YO8dL3vKa97zorZ9A1/sqBtiJvfDtVJtItU990tOe8y1AAvNJ0P4pIAHWY4BCAHxc3lk0osbCbpIWepcDU+mhCiWw4kYl4dallrmrHa5qe1VhuFH4uY1FLmNBa9JtdZhpHy5wiKE74txmslonBlqKRXdhazY3ka68VYxnNmMLx3aus61ljvO045P1+HY/1m2QdTlkMxX5YkcWXpJfnOFmbljH1osySGvM0SoDssldenLCtKxSLvP1xkL2JZYtR2aamln/tmhmspqJnOUBg5jEOt3tZ3YJ4zoLQMxjnbIxl8wdMFMJ0Plqs0/fDOQ4F3rOTvYzou+64sfKmbh0ZrOdVYxnp+o5XoZO0qTNpWikMlrJjs4Mn5E16miVOtCV/nTEIB1mSd/ZxYNOdWhWzWFbcxrXMCW0qkP9llZD69VtjbWwd01st3z32eijMLSfLd84HUzBktMu3qo7kXGOTNuSS6+4x03ucpu7nfBUoDzTzd9+3pO+9+0vrfBr7QIAFL4ClbepwO0WhFpQoQZz8IJJ+EGJ/lXgb8F2kpSrYQxflae1PSZsla3rukbclAy3ssOLC/Hn2nZlE+90WmX9sd5ej8Xr/8r4l2ktRuPSGOUYJbm3mh3M6nlc4tNtecNt7GXa3hzjIQf2ZZdt8Z93UuU4ZvkaXe7jj5sJ6WnGtHChavRn5nzpO+/yw6leYKd3CeqXvnJcmY5kr9/s6o4UOWFl/rOq/xLtlxR6aIme2YsfPejL5fnWndt1nLNdxI79+8HsbnXB85rjXH+52ReO96yfueeQJfzbDU/zaNq870CHe83lXmK6j1byOZQu97jLb+OVPnrc/si9pk3ta4v33LCPvexnT3s/sTuA665vtVUF7wfa1/e7956+Q/V7CSqcK/4+/lYYnBeE95vgIzSw8rUC4RLKsfEa1zviVy7112Gf+2KfOv/4N55DsD+6+zrPvtaFaP5hKz3tnM9z5in//rjnff3l/37S0Y919T9+8p73IvW3eff3f+ynf1EXflkUf542fwG4Z5U3QxRXeA8IagNoeQw4cg5Ycc0RgSSldoG3gZAXduQnfvungM+DgCSofWPneHBGgRy4IofXgv73ggAYgwLIf/BXgDZ4gJqHgTzYaCN4figoUj8ogSAIeKdGZTc4hO6ng/bngkLYhHt3giW4gEGIajDohMx2gUiYgWt3TdvzMKnnEN72MqeHQmUoEq5HTtM3Q7UXh3I4h3TYTmv4EPMUb+6WX/tVUMOnKfTGe/qle70nQYfxhkYRcIiIK9DnUIr/CHAJplCHQX9QSIBSqIW9MokVOGuVCISXyIS3ook4CIFe+IFgGILVIopcuBwzaIIJeIVUoopV+Ios6IoriH9mIovbZ4W1iIWfmGvIoos0OH69mH7EiItdIoy2SISw2H/HaIB5ooy+WINTuIu0iIyxeBmUWIQ7+IvBlorauIkf04rT+Iw9GI3hOIoW2IlfmIWgiI69sY3N2I3UiImhmI6r2IGl6FKnCHjSaIy8iI3OGJDQmIv4OIu3WJDlSJDnaJDxKI4zt49MlYQo948DeY0KCZAY2ZDJeJDWmJAcqZEgWY3wWBfyWIwXOZL2mCd3aBAVMXr+lIYzJJN01JIFcYbh/7OIhVSHPNmTPvmTinJ7/ZN7emiTBPEphGhv+LZewVc+fygqT1krOpksFTSVvvKIHKQQE/VrDGWV1fdBMQeRXxFIJpcuE0iFH8mM4fNXZ+mDJ9kzZbmEwIiWw8iQeBOXgvaNW4iQagmXbEmRi8c1R2iK7mh6f9mPMHd9g8mPhRk9eNmW+beYE4mYjwmYfieWSjh4t9aYQ3d3knlWFFmZiBmYbtF+XUhOh8mZc+eZb0k1oqmanceamIlyfLOZ3tiZe5mWT/htqXmbq5mbdbmRd9mb9fiOpGOarPgur+mbsQmcy7ibaEic5kiSx6mCfema0mmXK1mdn9lisJk8y1mcc//plrNJltkpnNvZOKJHhqRnlVX0X6znXU3JTeHlhq8HlPiZn/r5k0ZJEfNpXXtYb9zVnwBAJ8IXoIL4JmFiMGHyFQ06LgvKOw8qoWPiFU5iJVeCoRFSoVBioRpaJB8Koh4yoiHioUNCoucRokeiollioilaovcBoyeKojjiojUqoxlKozH6ojhqo1vCoj8KpGQipBfqozPKo0h6oz2qI0tqpNpBpB0KpU8qpR/jpEeqpEm6o1i6pVpqpUPapCuqo1fKpWPqpUUKpi0qpjmKpkFqpmmapWUap2sKp3PqplHKpl+Kp2eqp3dqp3tKpnXapXK6oVTqod3CoBw6pWpKqH7/+qeCGqiDyqR0yqiNqqiTKqmASqmXKqKVWqWF6qCJ6qmfCqGhCqqjSqGn6hUEaqD0iaAHuqr/CaCw6qpOSautup+4mqu6CntCSStECXy2Kk6x2m3DqnrBKqt96F9ReSpeWZWQOKET6nzI14gXpZiCB62WKYJ8iamPqqnTmZ4p2J2flaqlOpYe2FniiqjkOpqXqY6WuqlhypzyJ5vuKqqLyq3oaZzel66oeq+cKp56SZfP+a+Ziq8qqa/heq3lKpcBS571Kibr+p0NSK/52KZ8+q7firBGqLARK68T65wLGakE263QiZIsxa/YeqrkmLCzmbISq4EUu60jK7LxCrC4/ymwIQupOuut2qmxJ8ux/lqzGTuekQm0F2uvHguzICuSPEuzb2qzvylE6zk+A1q1/jSrNAme7rmrXNu1XutOvWoqvypBxWqGZcuGs5q2yTpvZ4sQzZpQWdmvRwuxXfmsThGWD2uqQfu0Q2uJNYG3FZun8Mq3PRuFf2utLbuweemJh8tIKKu4E2ppHgG4Miu0JDuzB2u4MkG5uom5TbuzJRucIcG5ovu5pmuw15mzk4u4eauueyuDbEe6A2u5TmuxUEuYOCG7qou6vOu5oTu7EqG7TNu7tAu6pxm4ByG8KXm6vtu8ySmWykuPBeu8hJuvmvsS0Xu9tSu40/u8eZu9fv/bvdVrvN6LvAYBvowrvrarvjlovgWBvu04t3orv5npFfCLu4O7vpdbvpUbvKzrvkibv9xbuOGLvf/bv/q7vY5KwOlrwBw0hlR7tVb7bli7tcdKrF+bwRq8wfETtqEytu2WthOcXwO6tvu2rAL0tv8Wty5Lv5EYt9K6fPfLmC4cuef6Wo/bsQJMiuwIXEa7w316u/O6tMtLvOPLvPxrsjZlnUacwOSrjz2Mrj/MvgtsvQ1btImrw317sw4LwHRbw9kas51bvEh8xL8bkhubxa87wFbMxVjcunILxJzIjUucw2tcxU8MuxKZXExMvU5cxnocxTg8xftrxsc7j3VMyAr/HMRbHLVdjMBsnMeRnLrUua+KLMl4TMngmsZw3MJC/LE4O7x+PMlN3L6IzBUz4rip3CsO0LYu6co3CctHKcv+yWKtbMscnMu6vMuEIsIUPMICKsHC/MtzggDGfMzInMzKLBA+oczO/MyuC8bHoV/PXM3H7BLUbM3VrMJeGc1yHMB7EcPUR60HNcOTeceMXMiBTMdAdcmATMqHrMTtrMbS/LJhKMalW8qZrM/ryM5b5c78jLEM7Mhv7MXz+81f/MlKG8pFPMr77ND9fMr/TM8IfdCN3JwMLb3qDM9kfMaVzLKdDLlhTMQavcgCbdLjeMN8bMf17MIry8kG7c1UHNHy/zzRIa3FA43RjzzGhtzRED3HEv1WAP3T4JzTQ5zR2ovJ6YzSESnIKz3UPh3VUOzPQk3RM33SmXvFZrLKpyUAXJ0nt+zLxDzW84VEt2zWvJzWar3WYh3MZO3WJDzMce0pEerJV12l2azNzozNel3NdS3SpyrORfGVxPLXOM3U5rrHAILORY3YMm3UoHyPhfrYjr24BO2QjJ3Qd63ZG83DVB0kmW3Rnb3USg3UNe0lpUrZpd3YWe3GmD23qv3OD53EAqkkqR3Hmy3abXzZHTnZuD3aWK3JPusWhh3aur3aTf3ZVRLasR3QrC3cRPvaCN3cRM3Zu63TJQnbv13ZkCndm/9N3VL9xx69ybbt23YN3Cmt2A9i3oBt3JlJRcXd0hX93tsS3/O93a3N29nI3oeN3Int1EHR1gkq13Bd4LyH1ri81gq+4Bks4K8KzAP+1hE+12vS18/MzMZi4XuN37JN2hGS1xpuzHwd4sjMzXbb3x2eFIKdiC8MlgfM0+Jd3ccN3VptyVaN3tad39jNnVDd0z6+zkHtwzfO3SON1AVM5Cqr0v6CnD/O0U3u2UEuxUPu3wzr2jZ+0+5t2Tt+5THN4c6d3FE+yFOe4s893sM9z1gu3xd91DudzzIO3lMd5k895l8+42Ye3VwOybP95B5+5zUO0l1+3tfN5gWt533+5lr/Tug8TueI3t15DuNOHuPhbcqnLeRpft+OTiVfDXJeXS1hDeEPTuAHfi1nTeoMfuqojqsOXqurzuqg7uqi7j0MMOu0Xuu2futMQeLJDOeSzufaAeIhPuK6jgDG9iwCtsIF5+WNrkHd3OLEUuzOAsGB3t5Je8+s5mc93uvaTum17WzYzuiTvudAXulEAe3NIu2GHtx1XuX6LWrffum5ze5bfmjvPu0o7udWTu+axtKYrrgvXWz1nu5lvuzsei3mPgDoDuniHukMD+XkPhQHn/BuHu6HTvGm3e1cEfF9bPEDz/Fg/vBCofH8Hu/KTtsZ6e77nu0NX/FxDvJBIfIqv/Ay/0/TGL8VMA/uvt7xLV/zWnHz8I7jdh7PPF8UPm/vWZ7pKD9hG5/zOU7mHz/05R7wCs/y2z7zF3/yxRaf7BMCCrHpXfLpsQ7rEn6gCI7gqX72aE+HrS6sF2ysa8/2by+rbzLsxozhdC/iJc/0QT8hwK7hwq7rJs7C1H7fK86Izu5Q5gya947o/57IOF/1VM/tWO/4P4/k9oyKJJ3UTr/3ek/f2jr16k7wlw94ie+dag70no/PwAv5oe/x/63cYl75VJ7okV3ooK/zrI/7ko/GP/v4K9/6Oz/5aG70pz/otb/osr/58q7ojz7xnZ/34w71sU/8/V7wqr+7ou/SSp5yS/+f+02/7rSZmI4b85Fv9ekN4Nw/8qgP/bv/0TAt8N+f/Qpt7aysmF6v6bRcoPnPqq06q2Vv6gAhQOBAggUNHkSYUOFChg0dPoQYUeJEihUtXsSYUePGhwUAfAQZUuTIAgI9jkRJ0mRKliFLCjjZkuXLmDJR0rQ5c2XOmzt5iqz5M6RAoSOJFhV5FClIpUsBNF0KFalUp06p/ryKVUDVj1l5es0J1qZYrmO3ViXbMi3LtSnbonxr9GxZtHOj2r1bNm5SvFP7Ft1Ll+lfrXoJF+YaeLBhwXUTH/4KOaxks4wfN7ZKWa3mzZYdXwaNGTBnt6RLe86MOq/o0aFTu179WTb/68iw/aq+bbs1bcSvZ8f2HRw4b5mKn5qGi1wu7t2/ibNVPjS6dOZCjR+v/pyvbuvTF3PvLVx7cvC1y0/Obn489POV2xf33jU+9vXsnTcXj3947vrk73dPD733OuvPv/wAHNC+/8Ir8LsDGeTvQfUkXM+46y6cD8MAbQpKKJyW+hBEn5AKkcQRiyoRxRM95KhFF1+EMUYZZ6SxRhtf7PCnFHVckUcBDAAySCGHJBJIgYpEEkmBFKhqRx9z5KkABKakskorr/TIwgx/bKBLL78EM8wGjkyyzCAFIgA0BtZks00337xugQDmpLNOO+1cAAEtDWvgTj//DIBMM8tE8zEG/wZANFFFF2U0gesAhZROAvZ8rM9IIRV0UCUFSLMuBRgFNVRH5/MzgwdORfUBCe6cND4MLbXzglRRXdXOTDUlslBPG62gV197pWDRUcu6E4IDjkUW2QnsbDXB0mClU4Jkp3XA1h9x3bTTzD5dVINpkd0gAUWH5epODr5F9gFmKa0L2jnRRfYCa7HN9jFuE00A3mM1GPfROqXV9wAI6mx2wcjczSDgA5al81Z6jeTU3kUrUFhcRMmtyk6FjyWY3cygnWBjD+p0+GFdt1UUhI0rwNffOS/Y+IBaAyiYQrOgfSDmmUum9+S77k1gg5hBuNjlkGM+oFqaPb4LVmORbvjah/+F9Nmve72NOdwBMHZqTgwQisCCb2uteb/RYIV5WgvC/pZhnrGterRPP+jgILa/FZfrpeZUKIJpOZCUab9gdeDbCATwO1l1+ZZ6aoi1/XmAusH+VoSiSQ3ga7vFTnbgOcuOsOl/px1oWoYDbdzxuLube/KC7k7WYr2RstMDhRf/XPCzNU62dGQzIDn1qVfX6t6tN+b3cmLrPFpfpXN3dUt3AT7Wd46Dd5zqiHdVFGt4tVa+3Fj1Bb7j6Pm8M+fqBUJ2ZsazP3N7lMcVGt5gW8a8zsLhZVX37twNQPN8B7h5wc8AxIuM8RBFAXixrF/5Yx4EJDhBCJwucOerlJ8mOBB7CT4vagY8oPwixygKlNCEJbQY/pZ3qT+BTj+Dw5RAAPU2XCFwMgoMlahcxsL+YbBdMRTADIVnMhFaLYdHDF/GeOgnFyLoh4D6GgaECEIbmgWHSHxgWTy4ROhBiUM9ipJAALhEB9BQU1VsyUtCkAA2ttGNb4TjqLzIoYAAACH5BAAKAAAALJgAAQDeAPEBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ5wB6esjZ20pAic/l5Tw6SEU2fnkrZKW9vcITLI8kVGUycysAAEpZWaopKaoAALMYODAnXKccQjVkZNoLGpUzM80YGAAWFm1HRwBVVdEOIh9vb520tLXExLvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAsAGEiwoMGDBQQIEHiwIUKFDB1KHJhw4cSLFSNebJhxo8SOHjlCDClyIQICKFOqXLlSAUmHCl8ejCnTIM2aBG/iBKATZ8+aP2UqRLAggNGjSJMmXbAzp4CmPJ9CDfqSKkmrIbF61LpRIQGlYMMGgBp1qtSmXC+mnbhWYluYZ3d6FUv3KNm3M+P61AuUr1C/VQFfFZxVwNekEw4oXqxYgtK7hLtGlgy5slnLaA0rjcC48+PLmTHLnayWNFvTblHDPXz0QefOGpKK7jubNujQt0ezNvr69QSktQMHF5577/DBuyX07hwBePG/xwtH36q6Id68u5e/fmD3OXHc4EdP/6dsljUG7Z05dA9vm3178d6/6z7KmfEFCgrzK6SQwej40/8BGN9gAabGWmKM4affgv4NSF2BcEFoXXV5QbabBvYpuKAA/Y3lIHnuQfchiPBltlsA6Cn223olGjdiaRJW+OJpJyqnHQayzRihjjuGKJ+LJirFgXYO5OijdDxOGKNNFDJpoVKu9eaYkS2+B2SVVmYpYpB10bWkU1+CmaSMRxZ2YpdUXqnlj2sSOKaT5aEpVphlvVnQdXCW+WCccoKlkUwg1RSooCPtNCigheJ06EuLklSRAkX1mdSGlFZq6aWYZqrpppx26umnoIYq6qiklmrqqaimqmqpCrDkqqsIPP/66qwpKZAooSbRSqutuer6Kq9/MgoRAwoUa+yxyCYL0Um+ugpsUwVEKqlRCyCgkLTTLoDnnQIQNe1R2naLraThQsbAAOimq+667CYw1Lh9lgvVt0gRoBC9R23r1JmSzoVvAPoOpJAC7BZssLua/RswAP8GYK8ADS/sL74T0yuxAAQbrHG6CPPb58INPxxxk9x6LGfF316c8cYadzwyWSHfqzDJ+45sMpoqs7yxyzPD/K/IPfNJccJDQ7ayzu2iPC3IP8tctNAWEx210UgfrHS/NA8UM8RBczn0zV3mXHXSUqecNcNNc/2011GDXZfYY6vL89o7bf0y1Ga7TRfccaP/O/fU86Z9N9t5Dz7a0X3/bbbP+AJN9156i3X1x2cP3Lfck598tt1dzzdz5GHxnXjmOG8ueOeQ22z4XojHrfjSpjfuNOCEww46WKK7TnrYsdPrOO2ef716X62P/TrWjPs+++J42z78X8VXfTzlyX/7O/O1Y327Urkbv/vbvVu/POxPfv58YNEjPb3m1U97PfnNa3/+YOnrvH7p7Uv6PvLxU779pJXD2OU49r29hY9cvBpZsBw1MHhRDlIzW2BIHpWAClrwghjMIMIgODQJeqQiqwqhCEdIwhKa8IQoTKEKV9ipVjWLJbEa2AtZ8ixDLWuGLbkhDmt1K0T1aocEABaz/4BYQ0U5EE3Vuha+5JUZb9ErXE78FhNHE8VsvWuJTJOd2oCXOvOhri//Q0oBJXdA/Y2Pf9nz3/zMpLovyoRzjwNjG+P4lzDmq2zwawocuShHL9IxMHY0yhhDV8Y+7Y96/dNcIAEWwEUOEneFlNMh2ZfI0jmykXPk4xtP90fkZBJ7wWvbGj2yR1B2UXhuBOQn81g3TmpSlX58pSdjacqalJKVpxRlKmeJyk6G5JZoDGXhdslGWuLSlq6sZR1XGcxcDtOXpEzmMZdpzGb2sZey/KU0rUlNbCoTlt6c5kuAicg0KnKUkrnkJfNnyDOWU5jOIyZ11InOi5CTkua0ZD1pxP/Md+LknvirJO/WKVDwEVSP2/TnNXUJzXT2E5+t1OI+DfTQgOZzoBN1yL8WkMAIQquBM+Ng1Dy4kUcdsXQiNRtJMcLClrr0pTCNqUxnStNVuRCIMcQYEFFSRFwVYIg7FOJOg9hDYS2EWMlKalJ1SMSiOuqkdUmiAKBKlynupYrkuuITL3auAQ4AYViNVxaVt8Vv8pKh2aRO/Vh2P96xU5LuhKgz49nQ0qx1Z4/kXiTRNEmLwlN+8pTMXVuWVwC+la9x9etcAVvX0wzWanjk5jgTKteFPjOtgvXqVwsrxr12qa9uLagBD3o4zbYVfIf9bGJDe1GDZnRCjy3YaQ2Y2rr/gBa1oiUjaVlnWs7esbZ0uS1tc0vI3RKvt5FV6CYlGtjS0LO5jkWucZdL1tdip6KsLa1XZ0tG4IpFuN0lLiSniz7pWtcgAM3uYtUI3dTElmzkJUl6cdva0Z6XW+9dF3cJ6d2wgJe/4tVrfAuTX8wlt7LUFV9ZxXnWy5qVwOZtr0Ypq1jL0hWzdo1wYyUy3+HWV7f3dUqBCXjgCifYfaul71/Zu2G4jNhvvhWkZ22bYg+v+JwShq2GMTyRjXa0gx/FGFUNmFLYrXQiFNSgkjMIUiDbsKZQjrKUp0zlKluZUkrN8rEY8Cgte9mpE2RqUMWMw5768KdDFepOzcyoBGiW/wEKcfMAEQaZsMoJikMOi1X7YmckalWKC5PzAHkl6NFhsprKBSdaH+yRQveN0Nu92HNbPKFJ81gijo4bpOcsaeyqeL04pvRBMj22TV+OzgEG4IDn6Wkb44TUVTO1ocsXTsk2+MKM3giskSZr3R261om+NWMv7ZBd66zX3vv1ohlcTGAj+CXGZhmypddpRD9b2CwmdkOivbFpq6/azjaxoh3M7EZr1tv2A/eybd3sdQc7JNzWGLrZqm5ys5vV1hY3SeJtsHnjVdn2fje+w63emvC7YP4mLMBxXW6H5rvgMjk4uxIOWVq7+9rtDjjGze1Visu23gy/t8MJ/mmDnzvOkf9e+LBz7dxWh7cpEl+Xx8lmcY3rO+MhF7iuTy6AmJO45jnf+MgvfnOODxrlnFZ5tlnOz4eXPOI89zmMlR5qbV/X6a42+JK3XkFCc33rYP5gk6NWZKwd+SNjN1vZKXd2h4DwynCPu9znTve6Y3moOb3pmC3yZDSvmcwzZDMDj+plLQP+hYKfYJ7BItXFK2XPf+lzl/CMxQB2NemSj+pYFRxigZUYoznOy4s3+/mnT5a5omaSpZnu3h2zfsKot7rqXQ7gzIx+v5DsL1j+m/tUd3bVmU058O1JYYiPO+hFD37Sh9/j4pse21V/vY6F33m0xV76Vyd51qHn+oYT//reb7r/9l+u3eVXv8PkB7U+Q8+k28eYkbpXCu/1CvSVh7/11Gd/QdBf+xuvP/X41X0i933VpX81g3Xpd1z5B4AEwX+992HFVX2Ws4Cyt3/Ot33HZ38DGF0UiH3odYEJ2E1EZ3z0I4A6R4CcZ4Cet3r352ImKHQoiGILtoEUhYD9x1sd2IIN4YD053t3xHz4Z34qaH0FyIArSHsPWH6n9n6bJ4MSWHr2NYQTKIRGSITZ8mMjFWRrpzlbWDptVxIK8HVgJ2Qe9WR2d4ZomIZquIYhpHc4lHdpFnYldXjNomZNxXeKQoe+Yod794UI4XhLYS1TVXl1BohIQXlbFUCZV1V/ZkUz/xZcNRaCGbh0Oph9I/h82gR+NLgaSNiDEDhe5weCN2hhGniC4neJGChfopiE6gd6VciEUCiJqqiJpliD4zeKImhzJJiJRViBB3iLrEiKlLiJO7iKnuh/ruiLR2iDwXhiZjSDtciJzHiMrehaQ8iDhuWDMgaE0giM1IhMtAiDp6iLmDhwqCiLvJiCrxiLESiF7AiK12iM2fiJAvaELEiMB4GNnVV/wxiNldaJ8xhRveiBJQOQ+6iNjMSNxRiOyddy0xiQ1RiFVaiPv4WQsMiP0VeJH8iQu2iO5JiKOFeK4shh8niQ9Khq9miQFYlQHFmOQ/eR6OiRyNeRG0GRMoaR//+njFFxj/6Yj0uEhSqlhYbYWV3IO35oECYVUkN5R0dZEG/HhlAZlVI5lWrYlAQhK3+Hhz4FVGWmh7qSeGL3Q33IlYEnhxgBGUsZAJAXGIsoFogIaIqYlm/piGQBGSn5kCaJjNZYhXbpjjw5kt14jrhYl4Nxl94IkcKYkfhImE5hmILZjJOomD3JmJ7nmDA5mNCXkwTJmJGImSHZj4D5j3i5kpQZGJY5ky7pkIeZl6VZGKcpkg05jqgJkq05Ia8JmrFpi4/5jbV5GrcpmaFpiZcJmb1pm36pkjdpke/Im8VpnOv4l7kZmMPJnM3JJL+pmRpZkKOZnNXpm8e5nfCnnAr/2ZqdSZyfCZzRKZqrSZrd6Zw6CYvjOXvguTB9+ZzIGZ4n+XvVV5/vuZwo+Z3ryZ3t6Z6buYwBip96KZE6yZ8FupP3CXKwSZNn+WQKJJRKWYZ5SIZDU5TgY5UUQZUgGqIiOqIz5YYzBIdZ6aEAkBFkiXjDUnhZZpZI5pW7QqOzApYlJZeCKJcX05Z6phCXt4RxWXk+ynhN+IzXyTvuR3XYuZgW2JK0+ZKws6Q4mYwNapMIGpFkRKXiGYpQGpNSijVcmp8/6KUDmZ2/SHbIVaV7qZNYCqEPtKZdGo9f6pkyKaZySqbbaKbq2J+XNKYJCmJ0eqZOmqZql6eB2o4TWZLs/5mYKIWoWqqobsqoAhqpkASollqPg9qnDQqLmOqoVoqmWkOpWQqq4POpuTibYBqDSAqgappybKqgV0qqcMqFkGqqsiqqVuiErnqosDqni1qn5nmncfqrepqQfMqr9hlSt5qqEZqazSes1Hmej2qsiQqPwUqok6mdG9qskdmk29qAP1meeqWiSbmhYrhkMop2Gkp2aclI5kqi8jqv9FqvpRKvWmlU+LqvQcavfdev0EInF0MndaInJKImA8otdiImBgsjC+t5BCuwAUSwA/uwBZuweYKwW4IlG6uxbNKxGAuxDSsgI5saFDuxIZuxbYIkJdsjHPuxMJuwAVOxLaskFv9Lsykrsi/rJjVLJjvLsj/bnjOLsj2rsiu7J0VLmUN7skx7s2cjsU2btAwbtAebskvrtFFLtQ6bszrrsTyrtSQLtiZrsVCLtWYrtV2bs1eLthfrtUDrtkjLtWsrti4Lt1ULsniLsXNrt1tLtzbLtvT5sDjLt2FLuGPLtmULuETrtz5ruL25t0d7tzH7tpHbt2qbNYNbuYWruYfLuEgJsBnqr6ELuqP7r6abofaauqq7uqlroi+Eoncouj5FulsZh2LZlfnKKDqqRImYtY47IUX6eI2YVY/4XeSKmHn7tb8rnKpqpzVJq4u7vEbrrLgpodGqrcE5vZMbt5n6n9nKqbr/mrnJS62hWqijKq3Iu72Sq7y4Kqjfq6z96bvtK6mzir6sybl1i7/quZvpO4vYm56Nq78BTL3oab0kab+NOr7cK8DyeaBH2k7QmL0Ke7bXqqnv26rrKL8EDK4SLK4IXKkMPMGKC50GDHv/W8J/67kiXMHeO6kfXKoKvL6Uu8HlG67ne8LQ2rnSu8Ldq5+bCr+dGr0hbKjNO6zP+8Liq76WO7/Y6sI4HKVLHMNR/K013ME3DL7m27ZDnLZMbMFOjMU2rMVSvLk03Kb1K0VAaWS0e2ZrbFQcSmTvuq5ux7p0XMd2HJWu2yyw24dtPHiye2YtWodyXBLmuruD2LsUvMNO/xG8S5HE/gvGVizGSkzGVJzD+euMEPyEGlzJUEzJjwzE4SvEY6ybQRx/STF//TvDk6zDXZzKC4zJcBXBAKy97DvK0hnKplwvx3u/tpzCimyg8Rs4SCzKq3zJnLyqnpyOoJzFjqzKmYnCA3x6T4zMrNzL0fzMluzL4DjNzjvFtVzM+1vKwszNRuzNznzOxBrJDxzLmpzIrczL4OyTw7zJ34zNnVzN0gzJs8zDWyzJ9kzNxvzJGBzMihurAK3NsIxYsgzNtIzOMpzO+zy1/wS99OzQqknQAqnPDM3P1tzQEL3REr3NGp3N12zRyfzRJO3RR0zO0/rK8azSFy3OLInGu//MlH0cZrnrxxbqZKWLunf800Ad1FOWx76yx7j7x/p602Hpd7EbyHs4yH+4RDtKiO7c0dzCyIc4vGJVvP5V0yBs1SEN1sA8na680iN9zwFt0vj8z/Srq29KzPWs1rf8rGi9kCxd1ubs0nGN0rmaxW9d0XpNvmbs1hRd1S/NrfwLzwKdye4I2GFavSn9pHet2HKN0IdNxHR90PI82Ql82Vws1g46n1y9e14Nw57tz5XNvJndzdd71poN0yf90I9dwJHtwZz91afdzHztvl+8zGGs27Id00Xc0mbt2+oM16ndwInd2YvNzo1t2GzdxGfs2qy91snN0cfc1n5d2AXttCT/XNtXbNwR/dm5zaRVPN67OtAyrcJhnd3SratFokXx/S0OANVIad9Oid9Xqd8f6p9HUd/+bRQAjq9CXeAGfuAqhNQ4reALrtRzmNMNfrqCggAUXuEWfuEYLhBDgeEc3uHAndfBDSA/1eEkXuExMeIlTuILY8g8ityB/eKSgdXgotV3ts4K3c7dfawXOc7UXc6xDeJALpur7eMHfNumvdchbt27rd1h/NfQfd2YDdl1vdk9TtxBruRJPtdS/tqSXeV4/eNgjuXCPeRW3triDdLkjeRXruW0PeUbaeQfHuZpveTvvd3z/OQwPuZbXt0m7OWUnedybtnR7cXTfebgjdqA/y7mQr7nRN7nhu7m2K3mga7ajF7mRe7nzA3laa7poe3Aoy1/pR3nij7qbM7B6O3kOc7eYz3cX37pj87l7S3ppB7OrP7nxa3euIzns23qaJ7ejJ3Buq7nbQ7r4Y3rzOziWU7rZN7qDjHfCubskjLgDs5SEL7UQRZIAI7t/L2iCN7t3v7tp8LgDy7u4z7tM1rt5S7hgJK4qo7oyV4hKJ7iHH7i8q7iTyu4yL7mgl4YMk4tNO5n9462oj7n767cZG3r1Qm5oD3wyl7pzF6bCl/eJ/vdkM6ZmJvvkw7bpFzrmS60F+/Y+k7pw87nEP/xwZ7xiH3wHd+dES/rBB/yBs/xuP8tsyaf6r/c6cs98wPa8pzO8CLP64fO7jfv8zG/7AjfnDyf6C+P8lE+8o1e8ic/6yVN54Suq0Lfz0Sf8jJ/5Dtf8+2e9U0P9BVPmElf8JHe8wG+p0N49QuP8Rtv9Cuf8F4/9G5f6ufd62wv8d79oAGv6mC/6Urf8E5v6W5n7nOM7tRO7ol/7f+S7Y2/7U8J7pI/+ZS/IIrPrpeP+YZ/+JnP+dBS7x2u4d0C+vNe9/se+PBO+hhO76pv4Ssu1bwLlyDP9J7X72r575Nn46q10If+9+4u7GJP7L7u3MBu8+9cpj9s7L9t+lO/63d/6Kj+9cxf9A5/9K6u/Mc9+29f/XH/b+bYj96+X6vBT/JU/urkr/FLL/XUP/gPX/7f3+vhb96DbedwPv1nj/rrP/5P7/6/jtF0b9AAEUDgQIECBBAgmFDhQoMAHD6EGFGiw4UVFxIwaFHjwIYTPU7s+FGkw5AjP5Y0CfLgxo0GEbK0iDJlRJgWMQqoWVHmzIc7eQLwyTPoTJc5Ga40mnBoyqQJbzYluNSk1JFURVo9iRRqgKJbuQr4OdFrgKdesXo8qzKsxLRstULturUtTa9l5YJdC3GuXrx5Sfb1Gxfu26Z7H461O9hvT8B5Df9d/Pfl3cmKIyPOaLbx2sdAN4ftLLgw4aSdMePUHNmz6tCff4ouTdqo/+m6me+ydi00N9HdKWHPlp2zs4PaAohDdWCwwOICypk799tcwPLov2smDw4TO/W80g1+Bx9e/Hjy5c2fR59e/Xr27d2/hx9f/nz69e3fx5+fPPe10vn/9O+56QT8D0DoujuwvwTDCjA6Bh6EMEIJJ/wLAQsvxDBDDRFoDTcPI2uogA1H1DBEEk+0sLMEBmCxRRdfhJEBgxbYaoEOQextqhyrEgABGqGysccfmwpStRVhRBJJBWyzbLEbndzxquxgsq4mFZPE0sUlUbsNxw+hxK2y0cSMzcgsz9xyrCcDizKrMNWckqUrz8QyzdS8xBNMHMkEjk/h2vToSDqVZHK0L//ZPNSxOFtaVKM5B4XRzi71pBTRPeH000pAJxIUUi0LLTNPSytVNFMqG41pU4k69ZRFSZscNVZF39TMVDlVjYjVVl81VNRZfeUMVZ2EPcrMVj/lEtZfSQ2WVsrgNPZYV0EFLtFmgQWNWIWqPDVaaXkNldlsrc3WVkbNdRRXiHT1FNxqsX1NXcacHQzdVL091t0/yY2XX93sHRbgYiNjF1J9NfWXt4R901aphqOS16GCBz24W3h1W1hHgbd9mKOIAZiYzopvzZjHkqXc2OGUIcZ3V2r3vVjhmBle2eOaC/o4ZDRfRnhmHU/O6uavhFYxAaOPRjpppU0w6LimtiMw6ur/Bpz6ZuysXtBA/bbmumuvvwY7bLHHJrvs9grkqUEFqUaQ7bXRnkntt6VuG+6URESRxOUMyvvENZeVtVm8+y4RrMEJx7CzIZNagEMBFjeqSJ9NnlxKH2vkG/KcJF/sNGgrdxN0tDrGmeiPPb9T3H5FV8l001VDfdLAx2XdLdc/77w43GdfXfV/Md09r9iVvdZ3maG8PXW/hu/VeIaBHj152YXXXXnAry+eTemJD4v5cHnHuPaIuL319cuqnz577GlHHnjr1/L+Xed/Fp+v7ZtfHn3uewf/eO3dT1/39Ie/9fGvgL+rVfDgN8DvHdB/DqQZAPfHk/jBbH6UuyDKJEjA/wVuJTEcZF//nnepBL5PgB7kmcUyGLoVRm+DDewgVD4IwxBCkH7tK2EAf1LBnrVQLfVjzP1ouEMGyk+ENzwij4RoxLw4rSk3caJRoDY1u5lEbgzKWtrIp5GreWWKbTNbGMU4RjKW0YxnRCN+qjiSK2rRbVh8o9bWKJI2unGOH6lj3LIYN+i5pY9+BCJkArka1QTyb+pDZA0TacBCKpKRjnxkJMPnQ0A20oZS+uP4PnZISE7Skp0cISU1mUm+kHJen1zkA1OpSlCGMomYRGUrMfhKFtJydKYUZCw9uUpXXhKWoizlIHHJSUmyspi95KUuRwnMUzIzl7b8oTML+Rhi7v8ymbP0ZS2ViU1ZcrObv4RmJcOJSmpu0py4JKQ0OzPMcw6ymsa05jZvqc520hOd6zRkPccZTHsKM5/31Gc25ylNdgLUoO78WEERetB+ytOb8TwmEgUazX0206Ha/CZGMzrQij5zohK5o0fyaMU93q2kJI2jHek2NyoKKI0vhWlMZTpTmtZUPCGdyEjZeNKdCsAAPwVqUIU61J8ahKhHPapBFLBSOOIUpDylI+I0tLeItcYADcBqVrW6Va42wKhIBStQXQKACZXVrO9EZlg0V5PGlRM3DYBTWOVqkBJIq0UJQKtEY6iQDAggAzZxK47gqhm5htUgELAri/Aa0GuKZCH/GohABL4TWQko5CYIHWxCMBBZzlKWY4UF62FhlIAOWMC0p7VABV60WIbmjyAPOEBsvxPbA0zAKYGFUmYHIgHa9ja2UfwqaIkq2heBwLe97RRrF3o+gnCAtrONbQRuW1Vz6lYgx+0tBhwm3KQKALEuSgB2adsB8Ob1oRQkCG+fa5DePoAgl6WndTUgXtra1mPcHa53X9QB+sZ2BHc1LzipNxDfQpe278Utm3Q7gf7GVgQQw+9QicuiDTT4ABYAMGM3KpaBYKDA7O1tZQUC3456RrcRsPABRPyVCAt1wgPwQIo3oNgAa5SIAWDwhwVw3OOQ+KOazCxsU3yA+7ZYrPpl/1EFhuyBFSm3oa5Vb28NTFsR+7ixWcmsh317AQpc4Lj2Da6RRRuCD4yny9idsZNLTJvr+pYCAqCAbzkwECtv2C2ZdcBx3xzn3koXZ0Y+MmLLTB4+95a8A1Dzj/np2gBEObZc9nJ76ZxgRenW0QeYcm2LDOgxDzo8Z/btkRJ9ZY4OWCAiaLCfR0zpYFlXx73VAIQBbYAXh7fBIMhwa02N4wY7sc4R5ZF1HT3l7c76xQMwLn2ZnOvl5o4gWsZurBFM3bcqBMWyBbGKi81pJN81xuJVLbOfvOsA5Fm8lmV1tqzLa2zv+ABz3raYu92iCmMXw+XVMLAdq5AJPMDf/36AfXSnjdmF/Ps7/o4ii409b3pXwOEPrwCrRm3nZTpbhumO17ohlqx4t/jYrZq4vm2MXhRSW7Dp4risuf1du4Ycoi+fyWkIbpG+/nVYs6Y1w0Fe41KvJeE5uYlTIaLTnmr8OmH2uABIoDSmN32xQh86VPEodZEGBAAh+QQACgAAACyYAAUA3ADtAYX+/v5HPIoAAAAAgIDQzdzp6On9AADb2efI2dsAenpKQIk8OkjP5eU4ODhFNn7CEyx5K2SMJVZlMnMsAABQVlazGDiqAAAsaGgwKFyWMzNsSEinHEHSDiHNGBiqKSnaCxsAVVW70tIAGhoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEHFx4MCHDhwQdQoQocSLDihYbKsy4ECPHiAIIBBhJUoKBkyhPRiAZ4KNGlwU9wpTpkuZHmxxxZtRpMaFIlgEepBzKEmbMjUYB8Jy4lCLSpE0fRr341KhPoBCGDt1AMunAqS+9Kq06k2xNszfR5lS7MyRQrVoljBQ7VizYsF7vGtR7lC5fkD9HRoA79MFcu2yZJlbsd7HTxpARBw5AWCuElogza87rWGpnqpE5B65QeegHzJw3Q/3ckTXe1aFXBxaa0sIEC3BRw06t2qrrvb/79i4b2GTKCQImaF0Zu2xz58PPPj87eUPKhAKGcjjMe3d37+B9T/9PO5kySuxD5eoW/519e+jvpUsGOvgkepQVukZfO57/fv/xkQdUAB/Yl1BKDugXYFv9Mfifgwu2VV4AWRlwnwErKRiefBH2FBxIDXrY2IQkYTcgdxumFSJjD4rYomIkjmTiiSt6VqONLz6Wo2cxBjDjgDe2FqSQO4JWZGs9YiAABicGUIBXBST0pFFRCjAllVJCmWVSVV4JU5daWhkmAgo0aaaM2KWp5ppstunmm3DGKeecdNZp55145qnnnnz26eefgAYqKJsIEGDooYgmmugBVRaq6KOKIrAlllYeAOmlh0paKaaYauqlS10ygMCopJZq6qlSWsoppJ4mVeaZZir/cEBCr8J6ogJ/fSXAAbXaChSuu/bqK0nANsbAAMgmq+yyzCaQEK/DDlhsUtGeSEBC1Q6YK0JuZcvSVd6W+KGuCDBr7rnOdhuuj+OuO9K1ArjL7ojygrvutmOVe+6+yabbo6+5ygtvveNy+6+t9oaLb0L68ruvvwR7JTC27i6srsIXe2txww6bC3HF7bo7MMj0gnwwrBt3/HDCGoe87sj3FjzWyWeynG3KKntsc7UBi0xxzCXHTLOZOOe87MdAU+tzvCTPZ3LEiHFsNLJIYyzx0lCLRvDQTRY9NdU7R9vzyz9b7bTQWa8m9dRVt3w12UwnrfXTTXO2ttFt3+xyuDCb/z032nWr/bWyefO8t7d9u302xlyf6PXXhYt9eLaJ6x0042n7dnfOkQ87Nt9lK/435oFrPni/YXs+ebWVG355y41rKzPDp4OdseVKw505cVvvftbmKncO8OrRti7563rHDtTjbKc+/Nugx+23bL2XXhbwHQuPMPHDGq868oYr/+3sAmDvsPYoc2+rApoS/OlHjQprOJkgv89Rownkr//+/PefLv0xs5+HBkXAAhrwgAhMoAIXyMAGOjBOjlqVohjFMAk+qlVcSpUFI6XBDSIKg5QqgKo8aChPjZCEIHSJ/IYlK1qta1qcgVa4gCVDb8FwNTXMFg1X6KsbumRi0hMd9f/oJrchAq6IP8Sa9ahTPSTyjojTS6LuliggKArRN+ITVxbRBD3EhQ53o4Od76p4xCh+BIhjXMsW2bVGi7Xxdq7LXfTSKKEmmpGJVgQjTNBIRTXa8YpPLCMgOcJHJ+JRkHrE4h8TeUYlGpKMpHukH/MYR6MU8o6QFGMf60jJ48nRi0Fk5CEjiclJIrKSe3RkKTl5Sk+GMXl0hMglB5lJWG7SQ298Yxcp90VUKrKT31ucJiU5kVmKspbhiyWMFunLRk6RmLhkpiuNSEpaWsSYzRzlMFcZTWA+75O8DGU2kSk5XQrTltB8CDan+ctWBvOVybylOlVpTVZW85imvOc4r/n/wvbVb0w8HB4AMSZAi8QPZANtWUGd8sCGOvShEI2oRCdK0T5FkIQULB8JMzWpL3VwoybcaAk7CiopiepUKEXpR1FIUo4EFFYtFMBLz+TDsuSwWjt8ocWOVbt03TRaNSUkPfFpz23W00Pm4xf6aqY+W3nvm/AspzKdktSVwZGdzpyjPJ2Sy6l6pqroct72dsm6XmJVm+jkpmLAqrOrvjOVz1QrV6X5VtP1VKzpI2vxzFrXQOrzrGlha7PwylS9do+vUKWmUYmK1NoNYKlEayqsnjrWc8YznVR1LGS7JtkzUTavlpXqVr+qWcJG1rC++mxhQ6s6c9qttG5NrBS1ilke/9FVtteDrWvhSlu52tablX3tXWMbXN6C0qtU6epoqSLYo5mWs6h1KmKL286/9vV3ukWuQdZ5XXK2VrsaaS7hnuu4zppJtadl7fB2a9fTbba80Z3sdEEb1e8utzXiRR1x6WvccIIXOMqtLXOze9+CcBe3aL2sbwc8XPZm9bgFBvBtqZvbBv93IO5i33yZulCIHDRmCdVbhx+CP/+ZuH8MmymHV1PRFrv4xTCOsYxnzLCU2phUDGjUjXfc0vut1IMhFWkKS7opIf94g0PmSAIcy4CELNm9FvspC5+l4iYFNS1S7iGVdTquJ59OU16GnBsnzN/qLnafEwnz18A8XPCJVv/ASCLzao2i5qmxGcrkC/CC4wzcMn+kzka78+DS5Wb7wlkjej7qQwCdM0GLOc9yTm9913thgTBaZY5uHqT7PGfFplXRDLl0xzKNtzFzWtKeVjCoFyJqh5Gac6Z2J4K9S+kID6TV/Hp18GJt3Vnn88yAtQiu96Xr7PEa2N399acZu2jHFvt8x142mrspawor2dlObrN6t+fgBL95zwUZ9rmerdRoq5rZcz01dJMibnOR26qFrvWhJaxu+LIb2wJot3M3XW0/e9vQ4CaIvpf17rDyu9fWpjW3Kw2AgSur4G2N98JtDZhIr5vO+Ha47SSevm4rvOMM1ziyID7YgyPb10X/lXaw05xxbU964vM+SqLRzeoT2zx/YL65zXuckQ9jLMSGGzFDfN4yoEtO6BqhsdKXzvSmO/3pabqoBzMqdSTz3KBHtmCQQXr1iYRqxzfOugSTnJEq32pWMuVyY7K8vi3PcKewZTtMPwfhmFe83rJrTH5t53F+xnXVybW4vYWL574Xc6jTXibel0e+vT+WvHkHZ1nFuXJqI9zfgSWw3QFw4IQr+9yJz6yFKc55xFde8f3udHsHDXnGx9ezG0a1mVWe7LY4/r2Rt6Tpa2/5k3ve9poPOIZ3j/Le0774aw0+4LdL/N8bH/SnF33hGd55zH/+28sPr/JpfpDqq96vvrc+//BHv3nvyx78x3d+8skvfIGY/+Kpxj738bv90C/k/YOPP8CzD5zbt358r3desQd/swd9vLd+00d6+Jd7+idv7Ucu9Rd999d84vd88md/DJaA5UeB3/dvDsh/R+F/+9WBGbGArrdtIEd63CKChidL/TSA5YV0B0F0emN0qiODBlFiOmdiKfZPvgF1QBiEQjiERAgoVWdBVCdSBEB2PSd2q7J1LCUmGVRkXEeFUYiDBGF20oJ2WvgrUdaFLJFTb0c+ckdTbmdDdOdfKjgzgseABXiBEth9HHh+HghzD2gwbXiCuvd384doeQiAKMhULThPfIiBfJZ6dPhxgkh9c0iA6P9ngMiXbojoiEJViHF4iJdHgte3f31IECYIiC+XgpuHh4sHiv01eQxHipOYf2/IiYZoYI3Iio8IhwcoiZmYiH7XWyB4d6vohrPoipcIi5ZYi7/Vi3rYgHa4i6U3jJFYjLdIiYoYWYPIEJ8oLhy3iGv4f1rEiMyofrYYfpqYcpDojRPYjRWIes8oi3Uoig9YjVwUiNKYimxYitYoeXtFecQYePT4jqGIjRtojuFogcCYj5gIjrh4eAB5kAL5gZ0oc39Yj3uoiw3Ji+noi+voj+34gvg4PFhIEDQ4P2A4Ph05EB8pOTbIkSxWhCq5kizZkkA4kgLRKErIhFhnhUDmhJz/QpNeh5OdwpOXopN04RkhORJXthZlGCtnqENfyGVHaWX4whnyqI1oMo3OaJDQGJSKEZUjGI/ZuJWcRZVYeRRaOXOvSG/GaIph2RZj+ZD8iIzsqIyNsZb7OC/w+JXyCJVdSZbBaJYVeYxp6SFyeZYQ6ZYYCZeIEZh9iZa/yJBlGZYwaJHRaJd5yZZ0+ZdqOZlzaW602IyWORaIaZXqGJnlBZaW+Zh+uZjJOJG6opcE+ZemqZgXyZWjOI+C2Zad6RmfmX7n+I26GZC3yS25OY67WZW9qZC/6ZmYWZuVSZiyeYeHmZyJOZityJh76ZgbSY76qJyaOZCcWZrXOZzZGZ22/4mab6macQmdoAmZm0idremRYeI+AIVQQ4kmMAkAJak6J7k9MOmS/Nmf/vmfD3WEEpSERiaFIXRCN2klJwV2KmWgHmWTVgehWtd1DzGfMWWhSzmGPIVna4ehwaJ29nhY3+mbvDk/sHWNzamM7ric07k9IoiikvmPEtmYFFl0J1qXo8mNM1qdNVqDN9qPKaqaK7qdAvWjzBmjGZmQVymOJmmkLVqeNLqMO9qeDolQTkqehSmksaieTIqfVxqbSKqiW3qaYFpeL4qjskOawjil3emHVupyR5qjCjimsCmasnOmQBqmWqqkobmeLvqldsp4auqJdCqdWBpZeBqnaaqjdf/nnFLJLon6pFkapUNqcj4Kp5IapJRaqONZpncKqH4KpTwqpY1qmI9KO5h6qHq6qXzKpQv5p6nqqYLKqGo4m6daPqDapezZpoTaqmQaqOMTqaoqpzKqQ/4UQPEJYjt4YhRKYj0IYvPJLvsJoNRardZ6rXhSn2ASQmGirc06dN+6ENvKrVNYn0cCHEPyGu6RIipyrmm5LRaTrujqrsLRIccJr+Qjr/XKrgDCr8fJLfQKIgGrK/oqsPb6m/hasAQ7sADLsHXxr0biry5ysDhCsRErsfdaMPHqsBtrsUQCserKIRirIx4bsiK7riBrsCgLHyNbsS17sSubsg/7sh9Lsyb/264lO68yq7IsG7M4a7M6C7T7urMz67P9arQQIrQ827M7m7Acm69Pq7BS27FKu7A5O7RMK7NOe7VLe7JZ+7NI+69bW7UNy7VWS7ZFS7RjG7YsYrZli7ZPSbZUy7YkC7dQS7Rv+7VHq7dJS7cu67edubZ8O7F2K7Vp27Qae7eFG7UD660OCj/hOoORm4OTWxDj+qCOm7kpia2c27meC6ACuioEWoWaW67d2q0IGqEiNJOVOxAWyoUg6raH67Vn0ZS3kpQ4lYao2JWGS6Spyapsip01C7jDO6yLOqe+Wqd7S7tga7yzirzBC57Fy7zLC6zbCL2lap6z27zUq6u/O6qV/9q7ipupqwq+nMqig9u2dsuavDp8yWuo3Uu4xOum2mleTYJeSyq/6Vu35EusSRq9JPq3+yvAsjo+g+q+AGyc/DvAMOu8Bkyru2ur48vABVmc+UuICXzBCxy/6tu/x1usteqoE8zBJbqZwrum2RuleUvBNxuqk2q+79up3DvD1evCmgrDGdynfcvCQevBzwvCESzC4guj/iumMYy+JEzANOy9okql7ne+c8vDWFvA1wvELHSsBHW6prvFlJKf6ePFK/aDnzvGZFzGQBi6nDK6V6jF5NrGmJu6E/q4kCvHZfdCsDuGQ/y0tistUVyJOeyqHSzFXWu9MlzDCPnHv2rI+v+bxOFpqvZrLa8Jv0u8yJOMjo4cgPcbyYW8w4w8vYSMxJVMjVA8wqGsxDaswaZ8yCk8qivcyS3MxAHcwH68yk58tvPbww4syYqMwbTcvoO8yxv8yX1cgqOcx7J7qwobvox7zF6pw5Sci718wrjsytMszKQMzSF8ycusqInMycR8xMMMzKn8qtL7yi4IztfszeLcyNqru/eolcbMzcqrzqqcze2czs8Mywo8zrxszyq8vevsyafszIE8y/7MygBNz/lMzrEs0PUcxNrMzEQMyMGMzRB9z/Hsw/O80P0MVFisUGyMuSFNZGAcWaVbFmac0iq90i2Gxpiixgl60iLNxW//zLoSOnatKxCv60J4vM23fBR77IUf2tMRGc3lXM0Bbc6WbMEELcronNGCvJqU6c4iCs8+nctTCcHvzLtXTcVZjb0HXcutXMqybM0H/MRP3dVkXcHC2dAoHNa+bMtRPdYD/cH/a9RujdQKXdBmrdVVzdUSjaY/fNdwLc1TvNZKXcLcadhojcgbzddJrdcMfcO1rMyBzczsy9ikWthHfdiR7dn6XL6VXcxq/dk92tb7LIdpfdnrO9WPPCD429T8XNF7rdi7qtmWvbiY7dqYDMkjmtoOzdG0PdmiHde5/dO/XNvEidqofBAJAjfP7S0OkNMxSd32ad2XS2RbNN3NDBTc/z2tLB3e4j3eCSTTc2ze5z3S6U3TRAaTB/De8B3f8j3fT/Is833f+B3Oyl3W/CFC+P3f8O0Q/g3g/50rO512RK3bdhvUYYi7QEXVqaXJoGzack3NVVq/vQ3bEq7fwj3by23Cnd3YeA3cie3h/B3aRbynjq3L+x3ckI3idm3EK77JHX7iNc7WIJ7XCDzize3iw/3ixJ3iwMvjsm3jQH7ktv29o73aCo7cUp2Zrw0UsU3RJu7jVU6/4jnhD73VEgzVXs1Gfh3hv93jJW7kP57kTWzcpM3aTq6KWQ7h0jXmRW7lZn7lfJme3XzOM67lN17mdI7jix3im33R/8zhSP7hgf+u4yLO2Ype4Ygt2UvN3HP+1oSO0IZ+5oh+24J+3HOd0H1+50xN5aq955du56Ad5DGumtENSqteLd+t3j5Gx00o6zW53VL56mJM3rq+67x+J+g9678O7LAu7Oy93ngruBae3J/eFwNO4Pct4M5e4DLjtqVe59beGgxOLA4+ZdMut/h86NeO5Xj+2AibuF7+6Bf+5t3e5tX+534u7qGe5xlb2i3+7qAu6aLunfS+7Moe6TlO4u9q7vsO7oCu6Y0+td+O6eF+7/9O5gE/8Arv7gyf6ABvnRBv6v2O5i8s1gzb7vZ+6v5O8Q5v8Wze6b6b5pqN8OdO4W4+7iwutgJf8sn//uQYjrd0ze+OzvKeHvIGX/FYiezonvEYT/PqbvM3T/AfL/QLn+4uT+PzLvNBn/MwPtiGSe0JP/RHz/NKHtcqf/FLf9oNP+nuWezE7sbtPey17i7cbeu0zlC9/vZwH/fYEew72fYeht14b/fOqvfgyvfiSt3Rjt/1vSuB/+xX//VSLyLNXvjvDe2MH98Gbsc8jYaHL/Eg7xTZTpTbrmVRzhJTLu8Rn/SJr/Uov+lr3uQm391Ur+JEnu+in/WhP/E9P/KLXukc7/GQrvGUreZM3uY7H/tMH+/kXo6kXvmv//uZvvW4ffq+j/vBj++gT/ytH/2ID/tYT5tFH6JibtVQ/1/Xqz/kjO7zow/8Sl/wym/6vZ/64pvZ6F/8K1/vsn/+jc7pM2/91Y/9Tc/nHc3lQuz15l/6ABFA4ECCAgQAQJhQ4UKGBB0+hEjAIESKEA0yxJgR4UWNHRNy9NgRZMiMBglURDnQZMqUI0kqZIlSooCYFV2+VHgT58aDOxnq3LmypkUBJ4c+BEry6MOZSx0mfQmVpNSQVD0KdSoQa1arGbMObPo1QFeNZEv29JkTbVqeRr9udWp2odgAYd+uZSv3J960ehfCXQr4qF+EDsTONJzVgcECbBEWYOwYAGQBjR1TtsyWsluni4uK9ZzZp0HSpU2fRp1a9WrWrV2/hh1b9v9s2rVt38adW/du3r19nxbtE/PlyMQrSx5uPPjO5JqLOz/OlsF06tWtX+d5QPt27t29HyD8ke/o8UHL47xY4Pt67+nZv9cePsEA+vXt38fPwKCCrwrC85QMwAD/A8CgA/jLyj8BDuxPPvwehBCBicQisMLzorpwqs8o3PAuyeaDMMT6JKSJwwALzLCqFK9aUaQOuXoxrhYxAlHEEEmky8ITdRwwxsB8HGxGhmq08UEcTexxRyExspAzGZ388cMib5zQwySvlKzJHIEcysEpjawSRiXHxNIxwbrksiYvv7zvSCuzXHIvMuGEMsg60ZSSzTbDlHFOM+P8C1C17lQzzZj/1tSTPjfFLDMvQcVTktBDDWUJ0UQX7bPRvh4VkM4tJa30USIv5TNKTcnz01FQW6KU1TwTVbTUIFPdlFbyVkXpzEJfhRVTU+G01bxIP92SV1JLfPPPYNHjFEVcbWo1V1FhHVFWPE8VFltmn6VI10mN1dPXWbXFcFkMuSUKXaSmpXYAca8FllwNh+VQ3afYpfbdXeVV0VwN7S0oWmjBZVPfb/ll0V8VAVZJ4G5FTSBiiSemuOIQDEqss+fSao7jjYX7mDlvU/KM4QBC+/M3lVdmuWWXX4Y5Zplnprm15XDqWOTooLv5pZx17pmkn3EOmeidfYLvvcYMTJo9HuOFWln1/5r+zj2q23sUQacUAE8ArZdSUOGEEXaRwQQN/PqosB2jyy5Go3ZUbBdNHsthoiRr29p94a6V7LLsXpfu8PJG9m1l/T6L3rsEf5TwYhFnslkt632cLceR5BtVyPdinHG8D9P74MyzHX1bYjFP6/Jk4948UMVh9Jxt0AvPtPRyWx/09NV3Ut1w1m2ft8fOK099duJ/Pxz4hXX3nXfjUUc++r49pRx6579yu/bkt5f+Vua196l38KfnnnxVv/+1eOxDD1Xuv90/a3jrcRI//e5JL9/76nen/3n+8b8fs173pONdLyvZs5/5FKg56i2ugP1bH+0SyMD8AXBT8vufUvzXPP8KBvB2DYTdA1+SMacg5isoUw5yiuazFQptZCgpGWhayKKa1dCGN8RhDnW4Qx7mJmghGZoLjwayIQJNhUU02g89EkQhKlEj14FiFDvlweApb2xWdBH8FjIqPemHU0+roABxB6kxcrE+IBABCKj0Rcm1UYtyGuMUfcJFDWQgA6SxowbwQ6I4ouiNrusjGOd4nwsYwJCkMaQBLrAnNv5RLY4kIxbf5xgiJaADiUSkIT3AyEC6sZOQlONOiKSBRBogk4akgH34KMnEfdKVrNxifRJQSlMapJQdqNEqw/jBXVaxl/0qY30yQMtTGlKPsWrkK395xWVmkZL0KSQxbUlLEOn/korAhCUcswnIbSIERKSUpgBoaYBUuus/glygBTuYTjGyBUSzpOUEBDABWuISmcq8JjPz6cxuAqBGFKClBSZgAVqW05zJ7Gco17lQdTaUJDWy5DhpuclqIbSZk0yoH/EpSvtEU6KGXGRFN8rQdl60lf0clQc+aoAMcDKj6CQpL/eJ0bSMCp4SHZU12SnTnfpypic1aSwJSQGiFpUCIVXlOT350qUGVZs1bddBR+pQnsbUpz3tiBm/pFOrYtOp3PzqI4PZLq5S9apd1SdW04rWjFTMrW9NwMWQ2MQjOlEjTATiDJeo147gda9zVVEPBTtYwhbWsIdFLF/vqliM+LWv/wZpQGQlO1nKVjaykLVsZi1rEATU1bMpPNFPn+qoBizAtKdFbWpVuwDIrta1qzXJVEsaWrOu1TwL2BJu6yXbqtLWq2p1kW7vIlzY8fasvrVtb5lF3IFU4AHPhe4DIrAu5grEudF97nTvRQDj/ha53q0ti6obgZWSsG7jXWngupvc70auk9VdqQEqcC/4rnS+AeMuU0EZRwsxdwPxNYAEAsbcCgBYwA3Lb1gjqWBQ9pcgEgCwATgw4AdH+AH4XS8/2wve2W6KuA+IsAG0e16CgDjCEEBwhmm6YQ0D92/ChUCIDdkwGMvYACnWbx8T6mDrjlOgBKXlgQ2CWwykZqASFfZyUVQMVBavmK0vHogDxilPepbywloRAG5XU+VS3rduCRbtaF3cXqUqibjkLWUxA0zjABQZNUemZcZim+OM7riNzA1nKTdAYYJEeM84ZrBG68xgHg8EzYec5oz5bGgAb3fJ7m0yezm83IeYuJbiNOSIsVxdDqxU019+tJgjDdYxR666EEY0pj9A3YegepwTdjSdA21nM0MEArcmza3NS+KH3NrXvt71nAMN01EvuNR7qW7AJMjm3co6zL4tc4+S3bBlb/pToSZ1scU67CyjpMgYgNa0CwXmY2f7yQyxa0Ycq27GNnbIMuz2CYXtMcDmtd5/TTdDAgIAIfkEAAoAAAAsmAABAN4A8QGF/v7+RzyKAAAAAICA0M3c6ejp/QAA29nnyNnbAHp6SkCJPDpIz+XlODg4RjZ+wxMseitjiyVXLAAAZjFysxg4UVZWmDIyqgAALGhoMChdbEhIpR1C0Q4hqikpzBgYABcXAFVVu9LS1g0dAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ACwAYSLCgwYMFBAgQeLAhQoUMHUocmHDhxIsVI15smHGjxI4eOUIMKdIiSYMaTxJUqNIgy5YFX8IcKHNmTZg3W+ZUuXMmTgE+AfQkOTRkUY9HNya9uHRi06BOgfp86pBqQ6sHsbqUOpWrTa9QfwbVGhOsTrM80Z4ku1ItUbdhkcJVOpcuVLY06zLVG/Uu37hX/wb2S3is4MGGCwM+m7hxV8VfIYtdzPix48iXJ1veTPlt5sqcNWMODbqz0cNbJZcWzXq1a9N9SadVPftzbdmwq6IuS3vtbt62fffOnfV33uGnkctVbpd489HQW9+O/lq4873BPWdPvn159+t48Qr/NX78O3bzsa9LDE9+PPPzuLXHV+9+vnf7z6lPl06/vH7r+MH3n3wD0sfee+kVeJ+C+fUHHIMC8keghNwFOFFKJ4HUkoYbjjQThyqBmKGHMIlIkokhVaTQiiy26OKLMMYo44w01mjjjTjmqOOOPPbo449ABinkkEQWCSMCBCSp5JJMMnlARUg2KWWTCJDY4UIHTKmlklViueWWXWJ4IkQMIGDmmWimqSZEWX45ZZhtukmlAgHUaeedeOKpwAEK0Znnn3kqIJ5CB/gJ6KEBCCpAoYgiqqhfDAwg6aSUVmppAoQa2migmW4KqKae4kmAQqECOqgABJT6p0KpqornqQhY/yrrrJii6uqrtt6qa56jCrBrnae2uiurvwYA66zIUlqrsLoSW+yvvRYbrLS5DtueQrEmm+yy1DJb5wQGhCtuuBE8Kyqpv06bbrXNXitAttrS6qy13gbwwLj4mntntOv6Va+q87brF7zxXhrwrQdHgC++FOhbJ7/W+tsttQMXLC+7CGO88MITOAyxwIb9W+rBrh5rscEYl8yuwhuP+4DH6EYc8sT9GkbwyZJyu66wDrS8MAT6fpyxxDtTbDPOlepMr50U+Lxw0DGD3JXIoZIMsLs346x0u8LeO+4FElywMdS+1jw1zTJ3lfXJW2fMM74SCCDBwuWaK7TKRC9t9ldrW//ctsrMbjDuivhyAHPZaX9FtadWj4w10so2XrXG4hI+bsdkG3120Xv/1HfBf1/tLcsGWB5uw4drrjjaUvMN+aShO16vCOGaboDDD0c99MycJ+756zlLznjK4JaukLh1p975WYtvKrzzjwMf++RUr4j7uYi3/lPzjT7fffSvTz989brjfvfVeXOt+u/Se4+o5NZfb+f5jqfv9vpnfR6v+NBTnYEAGZBfnRTQJWqJKUXYAtXVEKBAxx3QI1BKgAQnSMEKWrBWDFxXBq1lpA568IMgDKEIR0jCEprwhDiKkpycBKUVUslKIWKTC5kEpxkuKUw+yUicbFhDGyYJhx9qoKf/9tSnXT3KMIzSlaKSeKsjdoWJrlqiEDflxJkUi36Tsx/g8JcW7r0vZegLyhXLF8bN6c13zGPd7nwyxuytcXW9014a4/hGmLSRi77x4qHct0d36dFUYKyfGKFFRkGaUX3L66Ia8TbIXWFxeFoUHR7f8sdVBTKLjdTVI6EXSdlN8jSVzBMfAQmVOyYyj4ssIxzPKEeVmBKNiqQjIw95v1OS5JWtRKUsVbm9VBrSioR04yxXiUhY6pKVdWwJLpMZS2QOs5e7/KUdg/lJuYQSV9e8k7qcyUtlUtOWoPQlJmm5RXB6ZJnPnCM3panOYubylt80JiXFCUnerXOcwHRkIfEJ/8171pOckjTnRtDZzWa6k5nHPGg6XRnPd4YzmvxsZy3lGRKCstOgE3WoNenJyUxGsYDreuBGoDRF6G2wXSLFSAI1WNLvpfRCKIypTGdK05ra9KY4NZIKbfgkbPnwhzAckZd+2kMfArFEZFKTUpcqQ6IG9UQtPRQRBRDVT50KiqqSohFPFan2Laqqf6riNPUpzIImNKMIfYv+tMW/75WyoWl9qD87qjbgDaCtX3wrWatJl2zaaZSWrJhX/Qqs9lg0ohgtJ0XlstZtAVaUhoXrQs+qWI3SpbHIwmsf9arJff6TmGidrFrtqllSetRVm3SrPRVq1tEOlq8XOexn+8nai//6BrMXI6yxIrtXge5Ft4/FlWDDF1xt8razZbXtPCE62/yRtrh/Pe6tUpvX1Ya2tafBraxKG9jTqoq6m7VuZePK2OdesrkM7e1i+8pR1db1tb6diGzpCtrxija75gWudFHrWfrS9rrKxS981xtbyWJ3o8z1r3MHbNkCq7fBUdGvfocLOe5ClrPT7a976xtQAkdFuyibsHdLBV7TAtST8V3Pu/ILW/kaOMAInuuG2Ufc8yo4vU0EqbVe+pGVWuukGeOxQyJ4wSJb0McozWFOl8zkJjv5yVCO8pGWSuUzMQBKVc7yUxE4VKM21csmQWqXefhlMod5QwmwKwMUkuYa+wX/q6XSqhKvClZOfXWr7Wnz67qkZ6TVqpPUa7GKJSxoh/QZaXz2KqDHV+jAEDrFhrZrot0s3g5DeNDtra5PDo2zSVd4m7VFLGUtTd6NcPpknvYzqAEs6uXKWNMzObXFUq21Vdv3wOxN8IxbIuuC0Zpttib1fWMcavSepNfx+rXfgo1iD2Na17CGCbK1pWzQMTvQkHZ0psO7aUmzWdGVbvaldfNoZ0caeNXe37UZne3ilHvcDZl2stLN1nX3r9Hu3raJY+1tAcj7You+d7u38u5SX+Tfs6K3Y/2o7+6eGNvmjne/EY6ygH9PxA9nd8QPQnFLKTyz9r44vgne8AsHpeOV//o4wMMNcXjnG9rc5je6v03pjAt84ySH+b6lPXFw21zkAy9LwYdt6p7XnMPiNniESy7ckxv56RLkM9SfvmUIIjljQFaZkEuS9QXWGbJbf4iUx072spv97GiP0U5n2NN3/ZQAR71SAXY4w6KaOewoSWqWqVzmuld9pF+/01QDbyexngXOoZJzE7lqXsQPUTzz3XVihY3rvYA4adAtLIb5m9xWy7XYN74ti4NOkMhHW6K3hvFlR4/zgpg+5v9NvefLy2ClS+T1O0c95VVveda73CC4dzjSW2573Vw+cjaW/EmCb/KffxHjrqs90R2M3JELnenGpbCqk396HGe488Z2Nf/ole/aGkN/rNUnfV6GXvkP+774DWF+053fx/Mv2PzWL/2LZ5/rV8P+/p/Gff/nfZyXf+uHfdGlfbUmgLlHgN+lYd03akk3fe4nfe13e/sXfp/HahpIe/infgAgf9nHchr3e2VxfLCTebu1eQ8IfqEnfhz4ggL2ga2nfw8Gfy/nfw0oehbIey52gxT4bDoofDQWgPbngCQGgQMogcQXhMb3fk4Yf0akY0n2IVenMl3nQEr2LlNHde9CeNGFdwWhImlXhmZ4hmiYhjr1dm23di4UdzE0Zn4nh2/4dypFhytkd3MohgRRAGCYKHxCVXj2Zn+oeFFEZ3jmeFQEeRkog8T/FoPkt4Gy14ED1YiR+IiT6Ij9N34RCE9AeIHkhoCaR4I3Z4I2mH41eIA6R4S6N4GgKIWf6INCyIlLCIOZeImVGIv891uiuIKkCHSpGIKW2ImSuHu7uHSr2Hz5hIqmqIpDqIyxZ4yUSH3fZ4DjwX6yGIrJOH/LWI0gqILgyHDbOIJsNIy1WIyumI3aNo4JOGKhUmKsOHnpeIyzCInEeE7muIO2KI2ayIvsOIrlqIvTiIzPyI3ROI8DiYEC2Y8ESYv6iI5N+IoHIYLtSH+AdIRMWII4CHz5GI8ZWYobeX3/6IsByYwh2RbYSI/aWJDk2I0F+I0MaEkYuY8IyZAKaZJR/5iDDumRNBmR6jiRU6iEgMSHFHGFXhdSW5iFk6OUw0OUAECGahiVUjmVVImGTtlCTnVmcTh3b6eHdaiVQsWVWSmWYHaVflGIiDhnd6aWhJiIaNkefmGN4TiTEKmROUkZQsmTdQmSdymSLFmRDlIWchmTkEWXmMiPuOgceQmNreiTKrmOfwmQgdkWg5mSCbmSO8mYgbmYBtmYdimROReZJDmZeVGZvRhyz2eNcQmTlmmTmGmP56genNmSB+mYlwmZmdmZm+mCidmQsPmQh1mTvZkbswmYtfmZPxmauUmbk1mcknmcfAmafrmcxtmcvHmPm/ibehmctumapuGcowmdwP/YjNd4mnBpGKY5kqhZf6qJnqxpnhaXmiC4msE4l+kpmuJBn+Rpn++pnufZFfdJnc/pmdGZnIsBnut5kQGqnZqZIVtoQEn5h7vFlNBzlUbpOBTqUltYlRzaoR76oTXlhivUhl1phzCFh3ISJmWyd0wFlmOCom7ilXlooh9RiIH4lm2pll11dD+hiI1iiFnFiAs5nK+JheYVn+wJghQ5oPK4lEf6i/IZjEsangT6PSgYPOKIn/vVggtqpD43fMj5mEA5pNjpjz/2pBYpk9Y4pQlqSVd6V21amGvakQ3ak8Pzpn8GpUkqpXSqmx9ppWgKpgUqphxJprGZnV7Ko1UapeT/yaZZ2i54GqfYNKeGCpyIukCBKp6MepID4ahICkiR+qgM6qeeiJPSiZLUEqqfqqZK2qfM2aR3mqmLuqeN6qrVCasmJau4Op6cKoyVup2XiqG6+qeb2pen6I31SZi4oqp6qqCt+qt1upeA+qWaSqu96qnN6qbDaqeDeptjaqoGiqoatK3SWqyn2qlBeZ19ZKFfqEFdaGQ0OmQXupQSGq8lAaL4mq/6uq8+wq5EiSJW56Jc5q9bSLBWKLAQhCC6obALix4qxrCIQZr+QSELQrENUh0TgrEGQh6nArHF4bGp4bCwcSAiG7EQkiAWGyHWWbIfy7IhayEP67IHyrHuArIP/3KyMQuzI0uzNtsWPTuxKYuyK6uzJhu0OYuzDUu0nUGySvuySFu0GluxQ/u0Ldu0N2u0SUu1eMmzMuuzXQu0UXuxu/m19WG1Xmu2YLsfU4u1UKu2bgsgWnu1axu2Kvu2GWu3FRK3gMG0epu2cMu2Vdu3YcG3gOu0hSu3eCu1Y4u2ZSu4HUu2+fm1j8u4k+u4/9m3lXu4Z2u5Pzu4XEu5NQu5oSuxjUu3Qpu4Yvu3pnu0JVKwB2uwYga7cue6r7uh/Hq7uJu7/CqickKiY0m7sQu8ckd3X0mWd5dDNlpEbCm6ndspaumjjiKk4EqoiKu6qGumAkqlSPiOCDq6nJutcv/6rNPrrYa7ullbrc7Kp9BKqnmruX7LrbxqrOi6vq9qvfZ7t8RqrfLrq+PrnYHrvqU7q+lbq/R7q/fbvuaLm6Nav6WKrPvpvQDMn+rbv0TatgesuPk7wNdqq0x6wamLv/BrruE6vxRcpqfrwXWbwaw6wQ7cq5mbwDq5wAbcwC+ZrM0rqrdowjfZwvv7wtfrmzl8qNRYww98w6savixMxC4MwTCsnDLcwTTMpTBpxOA7qeLLw+cawD/MugK8wgRcwkJ8wiCMwLsqwtR7rErcw0y8xUWKmDrsEL9CQN0ru1spvHGYoV+Ex+tqu7rbx378x2jIu27iu2Vpx2FpyC9qvHv/iMgpkryCuLyge8PQK1U+vHwcrL0oLMZkrMLRisFRnITqaqkpPMaeXK5hzMXb6ynw2MkfvMmkHKxn/L4VdcmV/MqjbMqirMn4WMBQ7Mq+XMrcSb7V+8ncG8rAesvA3MrB7L/lm8qbssrsm8zIrMywLMybi35YPMJanMmozMnRTM1DLMU2zLxVzMC/nItgnMvdbMu6XM3MPMyzzMuYzM7rfM7Yu8QsCMqDScVpysrTHM76PMXk3M/f/M8/mM7H3M4GXc/LXMH/603yXMv2rNBAnMXSm82xvM30fL5d7M8UvcNpbNFrzM1trM0XHdImPdIbrcBqnM/FvM8DLagO3cyW/xzRKj3RDO3OMw3Pu5xjc8zIAUvHQqXHQwnUIwXISJ3USh1lgvwlhHy8tRvVwfu6xDujMPolcJghjoyjMR3Ba7l4X32IWxrQ4xzJ5RxdhonOGG3NsizNH13S3bnTaCzORdzVHW3FSUzX+GzXZay/WYytZg26rSnXJLzW73zNTczTOm3GbM2/hk3YGo3THN3XGry/gP29gg2f7qjK3cvPljvYbwyLCO3ROb3Qk+3NxpXWAP3SAh3YMs3Yh13YKJ3REu3W9RjE6gzHtHzTtn3aIezX2nzZXn3DoH3Kum3Tnu3VxZ3bov3Yof3QbGzBv13Zf73byZ3Y4qqlLs3Zxkzavv8t2dKNy168wcjN1yTN0m5s3A3hAMHE3rriAPYqdkKdyFtIWPCtrHZy3+y61Pzd3/5tQvMd1EZ9hwEu4FI9ux9yAAq+4Aze4A4uEITi4BI+4bUNzm89GHM34Rq+4CyR4Ruu4eKx1YNo3itdHJNsVWEdpGPN2mWN2a8N3Bkt3Njd1hZ+2+nN3N/q3OoN3efN49M93pZt3SQO3jGM2wkN0nrd0kPe0LAN2TIe3T5O5E5s5N7d3LPd2BVu2ugtnM9dqKNd0Bce5b295XHd5a4n5K6t3JpdkjqO4zRd4m8u3kj8xW1+5N895mK+2DDe2E/e43GO51N+43Zu5Uku0tdN2UD/Xt3lneYzXp7+ud3P3NlLXuNwHaZ8juYuruaPvtmR3t1gXtph/ud6Tt3BjenDLbrLPeg5fuWxHdmArtj3TOWf7hDujVy17ir6PeAnWuBHjd91ct/2Hd9599/EXuzGHiS8vuu63mMI2+vJruwHHiI/m+WhDut94eEfLuEdnu0gfrnuS+2gHu66ceJh5bxgTbo0Xu2IDeWiHuuCXuVL+7mZ3ugSTJ7TztuUHt5Mvuetfu+H7ufT+cTzvLGTruV57u5cvuNbW/Dqnu4VnfBuPrMML+76PuqJrs3+PvF3bvFz3qsZz+jsHvCybs6yKe+nntmbTrqES+/4jvBlrvASD/IA/7/uqI3W7Ym24L7xBh/oEK/qCy/zcG7tNt7z8P7z8x7yNP/jHb+/H3/0M++M2Ru5ON/yDe/qHI/X9i65VE/xB//wLx/xe2vyLE/ca/4hy14Sz87saS+vvh4AwP4ruX6wxz73dF/3MLL28n32aK/3eR/th5zg3C7hEL4oga/tW6/zVb/thd/gir/4HN4eIg7JTh/0bUHudmb5enLShZ7S/670WE/eX07y+d71XM/zX+/zZ77oky/lIv/us07oZF3XQC/nnx/kqn/yn1322MzqkJ3zFe/ylt7qfU75Dl/6rU/0r7/qm0/bh//7Xh/8Tm7qY4/qug/RoT/DrJ/02Q/1Av+v+bG/17O/76Qe49KP9MWP+EN/+kWf+tffy6+u/e/P/SOP/T1d5+sP/6Pf7s/frcJf/k9v9QABQOBAggAECCiYUGHBgwQCPIQYUeLEAA0pXqR4cOHGhBg9QiRw8ONHjRxNGkR4kmNJlQtZtkxocSRGmTMzpoTZ0SbFkAJ23syp8GVQgUOJGg1a8ydEpUsr4iQq0GnEnlOZQo2KNKdWmFxbNl0K9qdXlVYfVjVL9qRak2xXYj0qwGFauWafRiXowGxPvVMdHCyAF0ABwIIJCwiM93DiqIfn+hVr8y9iwwctX8acWfNmzp09fwYdWvRo0qVNn0adWvVq1q1dv4bdmTH/0cWKC9umjHt20Nq6K+dufLsxA+LFjR9HXlTAAebNnT+HfsDtxuku4Sa9vhVhgejdoWvk7l289OwwEwxAn179evYMDiqwqqC60PJf66ucH3M5/Kny98fPr6Dz2COQQAREoksw5RREicEACYrMpghnenCgAQvEEL0DfUpQwQoX9PC+teqi67GpPgTgwgwL3NAuFF8Usa0Y3zLRqQlHQlHFFdlrsUPBYHRwRupItOpGkoRcSMcd1euxSCTpCzLKEGsMi8gTn0xIySU1RNBJKX/EkqEwIbTSxjKrZFDLLZu88ku8gJzSxTPHGtPCLQ3sss0Q3cyqTuWopBPQnXK8k8c8/83kM65EsRNUwjkH9TPFQtdjE9E9LwUzyEYpfNTRNCdl8lA0MX0zUhg3xbFTTj8FlUsOvSS1z0W1Q/XIWj0itNUBKh0101gVjbNEOVltlVc6Z+3KVGVvpUnVVIkF1VhIkbWPWvyctXVYBdVcUlpPf8XO2hGZvchIXCPldkdvVwVXO3FlJPemeCfKtVhRj2032XdplHNeieqN9t5p862W4Gv9jcjcZtNMoGGHH4Y44hAO6supyXbLqbfggKNNuI6xxWgyhB+6+LfYTkY5ZZVXZrlll1+GOebRMIZJY948vpnjnGluyeadf+NZJZ8zHk+8wA4qWjw4fWW61MGS7g48qP+jQ5E/pxQgz+ql/Nt3yK6tO0Drn/wLG8BI7UIL1qZlNXjcfrUVDG2Bv10b2LoZfdtHvOR+Vc+73W0b3rzV3nuvudn9W9/A+RVW76j4htvpxb0O1smRH0IRcsftljxx+y5/CvTMDe/bUs+v/Zo+0UFeWEHNCWf79BE1Hdzvwq1K2/bYO+e9z9VXZ/B13Tnfvfi4fo/8cdKTJ775cCu/EnjXl9/8edllpL1x2IkS3vTenQccTOSrz6n7Xr+3Hn1aa/deedwPf3Zy61LXb/ztgzIf3+vfol9M+4fHH/XuFz71KU587Duf+6aSu/aBz4AF/BwC9Xe7BcLvSP0jEwYHorD/crGugwyqmFP4YpWS+caEGwvaSRxjFpGxEGcZk1kMZThDGtbQhjfEYWxSaJKh9eyFNfuhD3UGwyECsYhC3CFHetgz5DTRiSCCIOrkB6Up6keDAknXitzjp6VFcXZVFNMVJaWrDYExg2bcoLLECDD0aMACb4SjBTRgKC6qEY1Q9CL27jjG9GDAAH8EJCAxQKkPddF46TskARNpHvUkwAOBhGQHCFnHNdpxf5S7ZJLUowFIdrICoaLkHg3pwIJlUiEXSkAnO+mBC5XRlFYUpSXzyL9tpccCquzkHF0Vy0r2co8D8iMuOzkgV84Sk8acHy9/2cZOXkACmwHBrgopy0WW/xKZVHylgNCTSkg+czMfkGYos3nGcabRl4K5UAUg6UzNfCCaxaymFMuJx3h+cZ58dKQw/yjJ9MCTlPK8JizvOcqgqCiYwhxkP6d5zoCGUZnoXE8HhGmBST60oeS8qDmX2Uhh6sifiPynPTNKz5DqEaLrwUAFVLrSCiQUlBatp0lH2qCN2kucMyWoIktKy5PaFKY7PWZMeTrTLGboozoFaVKRutQHCnUjEYNqVBMwsSOqMIhCu6pVq6rVJG5kiVjdKg+zKtYcltWsZ0VrWtW6Vs10dSFfJatbFXKYBtTVrnfFa17rehC99rWvB0EA0AR7QtpcEUYNWEBiFbtYxjZ2Af98dWxkHduQnyqVQU21rH0WIKfNlqiyTL0sQIFqnc46qbTR+yxmQ6ta1ibrtBChwANkO9sHRIBeAnjtQ2JLW9na9l9ySa01VwtakfYptxEQZgifclxh3pYAwRXtcIWb2WvlVp8U+K11hYndhAF3oNSUbnSJO6LXbkCfBphAd19LgfOm9yrP/a4YDaus007gvAbggHojYt/zPqC78MUpeMMr09HS57QPuK8BfLvciCD4vhB4L3SLO2ACU5e8EIFAgv94ldJmWMMRju8e5xuk0rJ3nd7MTAYYrNsTX0CV7qVsiO854hCV1gGdRLFmVhyAGwfymRLoJHdDJ+EKUzioFpbJ0WmRG0h2akbFBzmteQFpmU5WLMYBlu8dYfTa+25AvxEJJJUD6WUQY1nEWqavRJbc3C9DZM1iBuRvAezUZM4YzSSeiINVuWDM4XYiHPgjnBUsZyIP1ciGRvJbcsvfTorgtosO9EEAmV9Cy3imNAZTbgMAAU53mtPK3XFEOG2ZToP6ynTG5qXvXGPONqt0lTaznee55VZ38NX/LfSRD53qAutH05D6dQAyIAAVN2vOvXboPeWaELgqcazO9jMJoexC76JwsNZWTEAAACH5BAAKAAAALJgAIADZANEBhf7+/kc8igAAAACAgNDN3Ono6f0AANvZ5wB6esjZ20pAiTw6SM/l5Tg4OEY2fsMTLHorY4slVywAAGYxcrIZOU5XV5wwMKoAADAoXXFGRi5nZ6UdQtEOIdMVFaYrKwAXFwBVVdYNHV9PT7vS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwoMEABAQIOMjwoEIAECNKnEgRokICDTMOvKhR48OKICsqTDCgpMmTKFMi4NixIcuBEQzInEmTQsuGCRfedCggpE+JL3cSDCpU4MefPkemXMp0pQCMRYc+LUiz6syoBHNi3dgTadKpWwMQLXrUq0gBJJmqNekU6tagMa1WDYtQId2yZieOFbp3J968EZWuXdv2LtgADuRapas17F/AAPrelNzyMWDBg5tS7viSguK5YRu/7QoZ6GGsmz2SLm0RbWa1hR0fflD1goQLoLeKRr2adeqMv132Lo35NcrYo90mpilBgITcWHdHtZw3OEPrPFmbTmv8OHaDQTfQ/1QoAHpU6WSHQ/5ekL1U7YFdd/d+enr98QrNF0XPV/3l+um5xRt8rXE3X0nI8SZgAHEZQF5NjNklG4GRAdjfgulRWNyBCdqHYQgyPXhVhDqNpqGFfqE4mX/VyXcggu5xheEEIeYnUwR01VXigATGaJSKlbFo1obzdRigcDvmyF+KJ2J4oWEnGsihj2KpSF6OWUloYo9ActalalG+yBaVkl2JpUBLrtikYU4yyaWURZKpIgYCYHBmAAokoCVvBcBXwEgKyJZAoKP1qd2faCGg6KKMNuqoU4MKSuiK5FVq6aWYZqrpppx26umnoIYq6qiklmrqqaimquqqrLbqagIExP8q66y00noAorDWqmutegpgKGuIFnDArsTK2quwxRZ7rJ8KIZvsrscO+6yuywI2aVEKHKDQtUIpQN2QAhzA7U7ehjvuTeWeKO5W5a6LVbqQkQgll22uWG+Qa8p2r5elyTshvWzOC5+c+4IZb2h72pdvcgJrR3DDZvm75cBfAlexcAsrCLFXEvNIccEWg4wxwPpujFTHCpPM8L8fB8xyXihnqLLGL/t28XU3Z3ewbgnL3HLJNROXM3hDtyckSDH3l7GHJv8nMs5P6wxY0m7+vPLEDhctVdRE94twkj5nzbXRY783M9NBn/x10y2WLSPb4Lr9o9xVes0z2EqffWTaTrv/jHXEa/Pdtt8ei014ylMH/rfQdMu59N6Lq3033F49LHjch4cN+OSXV671250jZXnkP1Gtpt5Phv7T6IVvHl3PeVtNM+mDA017SKbji3qKjR8t+udzU15RWHnCnuKvpeF67oqRFsosWssH2Tyf67lq/fXYZ6/99tx37/334JOa67S24ko+r80+7+z5xjYrLfuxVgtsswwkYP/9+Oevv/vwt+8rZNHTSLa2xa5vec5dUWlXADMCL4oxQEwwMlcBfUeR3PFrd/YS3upc9CIjxQ5mimsd4zL3QZvBqTserBoIOXc7z/Wud2GCYApPlzgWirB6L9TgV05onBnqroavw5sK/22WQ9XtEIID8OEFgXge49HQcLa74X94+BolGoyJ+3HiD6F4NSm2iIqZsaLF7BZEHYaEdYjLGhgHI8aRYVEoadoiEUk4RKGtkTCO21kZjXhG4FUJhm9CYhtxRsYmCvGJc4xiGk0oyDy+cSdxXCIXZ+dFcN0RNo5c4R5b+LsicnKDl9SMHw04vBAucoSK1JwdGzlKCk7EglecJNo+eUQZZtJ1hjQjSNCoyuqFcimDlJomc8nHXbYSkBT7pUpuyTFT9rJvqSzhKm3ZykJm8ZByRGUXT+lLViJzmNfU5Vk8WUnPKZM+38Rlt3plGORBRnmSko07AYOrR9nzUYCSZ/XCx//PfvrznwANqEAHyir9GfSgDMDVQReKv/Qdin/9I0C0IipRh87PV++D30QjKj+zIACJDFDIR8XkFHUtsCEKnKBJJ4hAbJGSIiMVU69iOiVX9pGc3ITmNp/pE5oeaKbUfFzqaHlTOiLSKz6dD1BJ+tKv4JSnmItmHZGS1O4stYNNLapUj4pDo2aTqki8ak2FyjtxUoSX0gQrBMUaJ5sa86lp1Sklc9rTsIo0qBgMUjq1OVeogqSqxmErCrP6Vq9KMpE7jWtd13pXprp1nIaNJWL7qtiQAPY1gu0hYSG71a/KdZblrMhlM5PZKm72rMc0q15SW0yIjHYwpQ3jaVcL16n/fhZyoYWpXQXwWkw+FrW15epth5rbifRWLbFl42xNE1zP1i6xtl2sTBuL1d/SNrJjzKuX9qrW6fIWr7IDLV2jCl3hSvdFycWjdZmLXTfKErfj/etujwvM5canuYflq3j9Ktr5gve9xI1vJ9tLSMjQdynp9S1ZM9ha4BJYmB69p4QVNdMJS9iiycvnaKZnn3nmBZ4bPqlwPPwVgpr4xChOsYpXzOJKjQ9+txoJRTv6Toj2b6M3xnCNfVU/hjLUxhrVMVJEfJABCoDIBmlg1lraLYUwmVz2bc0DqflkdJ0WltkNL3z5C0pvmhXL7p3sfitrTi83GMwF1nKAuVxLx3K3/3TOJPOAO5vfblLzzT5BM4T1u2U5d/nOX45zdMlL2UGXGdBnFrR5CT1mQ//unCcJZtf0SEyiFpbOkp2mmwNtQwFvEL+ZtvOmE91pNmu1vM61pJktDRE9T1rNZW3waiE9pmpSOpyyZi+ms6zGVRdXIq422oL1qtrt+NrTr1R0ql344Ff3GtGsBkCwzQZrBkd71sc2dUSmzZVhb7fY8aF1BPGMO2XXebixvraxof3rbZs71Oi2drvPIu4kMlNym5y3g3cdZk1Xl9xIezevxdxnR/951NEmHjv1+VDoxdN5DU+AhS/scIi3qMUYz7jGN87xjlvqxeyLMVpmLOQPA5l9OP8O8v8avj6VtxzlJfcJkgti5JkTRMk2q3JLUvquKFdI5x3heQKvLPB+d5XfaQZwuvXN7R95WzUAnzOqz53noied4Gv2s1ObLexbw1GLVGd0wRfNbKTvGd+V1vd1ze5srC8d2cC2+tnjTexc35fr1Abn17EZ9rJPHd7PLTTZS1lqrZ9a8MuWOuL7Tvh8w/3Sfx84n7Nu8GQXvvKHb/TgP433bnsdkmAHvNgpv/mts73rj7xJJEXv98WzXvGaT3wF5d72yb9d25yNvNHhfPnSZ37ssje97q/ezN4H//ekPz7kXS95tONa3Xc/fd7dLu/Ht5r2qFd69XG/7+HPverGZzz/56Xv+WrXPeHYn77tt2/45cde/JZ3PPfX7v3aH73+2VcnuRZu8QxXnDcclh4k5hUgBoA2JxUDaEwet4AM2IAO+ICskoA/YT4cFXMEeHLnk3Iwt3IX9XIZiIHkQ2MU8jsHKBA4JzRAJ0BOVoJ44nMryFIseIIjuEHg1hqg1nx09212lzU1WCE3uHuB936vN4Om0YNohXnuB3zwR4QVYoSsBX022HlOx4Se44Q/SHz3x3xASIVFuIM+KIV1o11Q14NCY4Vg6IJPqHZcYobkN4Xmp4NQaDNsiH/ql4VCiINc2IVxeIS+l4TJt4REGHp4OHq31365p4VYmId6qIbRR4fl/6d958eIPOiFfKh8h3iHW6iIgpiJsKeEQyh8iPh9itiElJiG1td9oWh/o9gac5iK+bd+kXiKJ9KKmJiIQeiJg6iJfPeJyFeISHiJuMiJXLiJtkiI7PeLqFiLoqiLtBiMxdh6yqiKPiGB00iMPEGNIVGA9hGA/YGNIKGN6cGNx7NPEFiO5niO6LiAIHc+IreOIWiBSBEsGbWBBdBjPnZQ8DiBIDgtGviBHGgWMagtR6ZSXJKCDKQQU+ZYK9VzEsSQ1rR3zdgf9VZSYhgynCZ/hpiM4chKT2eRpIaRyEh/AMiRFSkcUTd74ceLfpgiE4mGV7iMjZd2sqhrG0aSbziGH/8pk/NHkyP5X9QXi9zXdGF4kxbTkuvViK5Yh7wHkn0IjBJpk5AIh0yXfo/4k15ilB1pkhepkxkpktsIlVaJk+iXkrnYiU/pk7AolacolC5pGFhZklCzlc8niV8oKGCZlmI5lWQpjKD4lWhph84IkyjJlJaokWepkHDJEycZf1wZkjzpl4hJlFqZk3M5k0gJmdWVlXFJmRBZir3zlpK5mWNJmIDolCx5l4D5hyp5fXv5jGZ5mn+Zg3m5luzCf9QTcSxYJRI3cfaUj9X4f9uYm765WulYnMZ5nMjJT95YEcHCcurznM4ZnR0Inau4WlxyIth5ndpJMdlZndbJnduZNd3/KZ7haTPeiVrlKTTjaZ7pWT3reZ5Rhobv6Z7t+R/zWZ3xeZStcZ8twp/g4p+6WJ/9KaD/SaCeA6B5mJ8IukEL+hUN2kfn+Z3kCZ4UOqEWyp4VGqEKaqC/86DG5KFnEaGmwaEMSqIOaqIQiqKBqJ/yqaIh6qLoCaMgKqEXqp4yOqI3Gh8iqqMZaqM9Sp8/ap85WqNCGqQDaqQFiqQHuqMVMqRN6qQtqqQdyqQbKqUlaqUniqUpuqNVSqRF6qVHCqZJKqJdiqFiuqRa+qFO6qNm2qZs6qZAeqZTyqUsqp9PmqYviqcxqqcziqN86qdyeqWBmqUfRp07tpwU0ZzS6X+I/zoRijqdi+o5yTmplFqplioq7jgt7Uhy/3iohuqpLDeP/uiB79ipQ8YuAhmDUTqoW7pkqtqQQ/dbbGmnqwqnsumRo9mYTbmnrKqmoamYctmZe0irxKqZwMqZoLeLZSmothqmYYmrekmaq8mrzTqmzzqZuVqZO8mjf8qtUTmbQUmVbtireUquhumLuzqrfQqo1QqNgSmN5daagkmo7Tqn3wqttCmv8Iqm5kqj1yqa0aqrhemvb1qwqYmuhamuUFqsiUk0ixl3+vqKcVqvzIqX+BquEauUzmqwX/qvx5qtwkqXtcqxt6iay8qY2tqV7Eqy1mqx2BqwKeuY3kqx9OqyAP+br9J6sq1Ksztrsx8LsyFrmfu5sMbqsMGarE64rjPrs0aLrDtnmx32qfQ0nN9ItcypYQbIcBd3qVzbtV6bnJn6LJtagaZqcmVrFo/KqOojqqXaqBKRtl4RkATkkES7sAaJUiPrfEG7rUPbrX17r3zZs3qLtKWotH/rsftqr/pHuMNquHcKuK5ZsYOretaYuJLbsSzrriq7tJPbEquns77aryt7sLtKsJ3bEZ8buKHLs6vLtBKLuaerEakbuTU7sZn7mqU7ukspsKVJrbd7uSUrs4dbfDmruuXKusfruhrbsrGbEbM7r4ILu9IbvLnLubsbs9U7vLa7vbdqvL7bvDj/Ubmvu7HcS77dS7vRC37Fi76tW77Me77Q277Xu7ebq73T+77UO7C6q768O62me7/8irjji7/xur7xm7zuG8DKW5W/i7L0K7yPi7zfC7+WW7vzy7giy7B+W5eN+3mUq6ze+7/mO8LGqL/Wy7/Ya8L2S8IKTLoqHMHEiy1QK4BSa7ZuGxFwO7XAScOR+jtf+8NAHMTqSFFjm2Nne4FHHI9Wm6hL7Kj7+Cz92LYAhKpzG6uOm7f/cbcMIXQuJaviOpSie8IErLkI27sKu8FYnL9qibEGXMHpO8aKu8DB47SeK77L28IAHMcuvMYZecZhvMJwPH6OOK6L+8FJW7e/2rQg/4vBQgvDCazHFEw2R2vIhYvIkPuzONu/oIvAeQy8JczHyOjHEizCgdyX7zrAAZexDPzInozHagyuffzFaVzKb0zGx5iusqzBf+zIkbw1k1zHIMy+nMzCkPzKFxvLqkzIDVzLxfzJsBzKuXzFuizAvkzHqGvHq9zJFkzMgpyU2YzCD5y9vMzN2+zMxwzNyQzGo7y/tLySt1yYDvA18YwVDtDEb2vPOIzPEJHDZis39Qw8/9yoQjzQBF3QJ3bD+6zP/Iy2+gwAC43ECO3QDf3QSpzEFXEAGJ3RGr3RHN0nTsbRIB3Ss+zK7RyiwhLSKJ3RD3HSKY3SpyW3A0m3aDzNKP/4qlpcZESXziPdzCRty0CJzG2Myjw91K2Muz+NzkF9x0Rdzj1t1KCMyzpN06zM1E79zFCd1N9c0hNMzr34zmYczZa8zpd5yko9mJocwuzc1EXdzdEo1Gadwr1LympN1WxN1lmdylitzFPNzGttyibrvaK8zPLL1e581Fd91sK81Vot11V9zocN1/6b1kvN135dxtMa2Hs92Iv9mHat1xdMyR0800UrydYsu9js2YQ9zJs91n+d2JitzZQd24X91AkL1qK9sC/pxu4W1dIszbnt1g7MyHw7zqsNyD5N2707z0Gk3EUR0DXM0Bb9mxGNKP7sI879Hwad3dq93doz3Qr/PdHgHd3ZGN7eLd5Va94rGtaCrdoHytItDdIr/d4ubafrzdh9Ldvs/Ts3nWQvKNNkWqe9jdtnSN+Zrdhzjd8G3tgv28j1Ldn3rdkHPttW/cINLsYRnt8PbppeHdl+u9MZnuB13doHPIJlWuEWPtkS7tgUXuD2TdcujuBe2dnqDJ8Art4sHuMirtszWOI37uAvnuILPtwmTtwXDuI/ruGGveKwjeFHbuQwztk5DtxMyONLzuRPztqWvckkXuO3PdO/XdZUSOWp7eQQruA3K+Q9btwobuVlHuJZjtYdLtVVTuZIjtwc/sce3uQt3tVJHtdDmudXruZuvuFabriA3uZ6/w7lb57Y5Y3QFK2P6M3Eke7Ek37PAG3dTczdmr7pnF4qjd7DNvzcEC3qFf3pgCHfIe3R4YLq8C3nY77nH+rerI7R8T3rGv3SVBzTVmzjc75a+01z/b3rqQfMh9zll6zIQCvc9Uvka07n50rogG3bu3zodT7hX83bvP7qig7tri3tYi3oiY7l3D7ir63tJ/7hOL7o5O7tQ87s6L7tfX7Z7J7m4B7oHOzNqF3AiD3isM7m/v7s8a7l5V7c7h7uUdiGMw6+DPG8Ou7s/e7w8G7nAj/vve7j9l6J157XCV/x527w997WYA6xGk/tEN/xfC7x0Y7txv7tHy/jOT3yrk7wbf+J8C+/7w3/8Dhv8UB+5kBt81KO6BdPrF9+1zEJ2YWu8/+e8+Ie8N7L3Nfk9Dtx3ZA69WrbcNV96ZPe6Vq/9VxfKaZe9VQPqmGvw18P9mbBAGif9mq/9mzfGra+0SSv9PUe62+v0j0h67N+WgeWEiGl610c4DP96zcX7H9vYLtV7NMu9ANeGnuPEjO87HGP9ACP8ond+Cfx+BAc+Sa/85jMfZZvEpgvzpo/99Wu4r37+SUR+i88+gV/8tY+rag/AKrv55uf9JKf7uPe8LE/+3fe+kD/+6Uf5Bm5+6e98eZO+iV/8INs/JZ1+JW88nL8R2ZF/MHM77d//bU/+a+v5dT/j/gsP/PLr/fOH9qJP9rVHG3d//zl37Ckjf7jn8GAf+ztr2/pT/7fr/g0/1v1D//Zbs7Cj4wAgWDAQIIFDR5MIEBAAIYNHT6EGEAhAIoVLV7ESHFiRo4ZN3YEqVFASJIKCUREGdFkSpYNP5KsKPDgzJkJF7Zk+RImRp07Lfb0KTKoRwEncaZceRQl0JAyaT4daFPp0pFDLzL1iXWnVphJpz70+tVl1aAIzJ5Fm1YtghEKHYht6EBhAasVC8ytS/GuALp59/atu9co3AByixJmaBjwVoWNHT+GHFnyZMqVLV/GnFnzZs6dPX8GHVr0aNKlTZtePPSvX7ys+eYFsNp1/+qgsgO3vv066AHevX3/Bk5XIXDixQ9wLUk2L/KQzEFOLGBcOm/o06U7t6gAsYLjArQTVoCdo3iisIWaH/4dbngBB9SLZW8VMUMCCudLVF6XPM/8VvdfPWy+sODa7776bkLsv5/6G0rBihwUaTDCBhSrwPkOvA9CADTkkMGgKPwKxKksRAxDAT3MCkXGzNtQxa4CTBDGCV3MyED7TmSxwxxpbE5GAn2skMeLbERwxh2PRE/IjkRUismjSCTMxBiRhE3HJCX8Ecsg5bvwximTpHI5JccDMsQyRxyTIiIzTJO/MPVrE0AtzZwTTS5L9NJIMPesMs4F62zyzEDvjDLPH//f9M/PBxWNkE1An2xzTRz5FBPRBgWF9FGcoIRLSj37tPRDRlvUtCUnN420yyIPpRTOUFMsNSdMUSW0U0O3bDXRVxmLFalZTU0Vz1VxBTXXS3fslapkVQq20GHN3PXFaJNbFqJTga1VLE9ZLbbbSq90lM1sv9qW2G+9dRXcE6sFq823hH0XLsVgs82qelXDzd5rUzKM3bjyffE0gQcmuGCDD0Y4YYUXZpgz2ny6F2KAa5tYYt1ye3iniC3OGKaNWXxOAAZGJrlkk09mwEp0dTVW1GlBBtApqGZCQOVzb0535WNhTlHmmQ2qeVSbc8aZZZ15JtPnATKwoGmnLRAB6KH/jS5656NTRJoxnyswoGuvvZY56JdDHntJoc/O+sWwO/i6bQ8KErtlrOVekW5p0+5R5gza5lsDguK+uu7A7x48Obzz/ptvvjuQCfCqXbbb8Mh7PDxkpyxQnO8MBnKcaM+p/tzqxytvUSYNMldcoM5BZ1300CEvPG2FZNq77QskkAyE1V1vHfbRBf/98Nk55xt3yT7Y3ffX546d7MmzHn6g2r2+PbIPdJ+ad+2V75150mP+m23UDXib8+y539775YH/flGZuR7fqeTV7579+gkPHu/oCfIA9c3/Ph/90me/AeKvfSLx2ekUxzi4BZCA6IOgACNYuf0RRAMVwGAGK+C3/wai7XlmKxuZQgizCv6sgyN00wdFqMLyHHBBSjPhAOb3QAnWkIY3NOABSxhDGTowhwWUXPNAqBp6zW4tR1RLxTSmRI8xkSQfa+LF9CVFfFExOQ3DYha1uEUudtGLXyxNx57oRJBAcYwCaEAa1bhGNrYxjQpxYxzjqJAEFNGKHLOjGPXHqA41YAF/BGQgBTnIBcCRkIckpElYmEIh8gxCHVoAmyK5rkUCCIWXXNSOJhmjTc6IAJVcECYbyUgxdbIhFHhAKlX5gAhYSwCmZAgqV5nKVrbrk6Mk5fqEx8ezwTICqIsXQxTiS9S58pb5oxwocRnKJMFyfBRolzNRB02HKP9ymcy8ZjZbpMmHbGB8BphANV/5EAp8M5xjOaYug4hM6PGSmw2ZwDcNwAFxdjKe33yAONN5v3WqU3bubKZDHiBPA9QSP50cqDwhgE5lnkeb/hxinzYJAYJ2bSwTragBGPpQH+6xoS3aZDltZzzIYOCgpxzpZExqTXZGtKWOBKhE41I8ypw0MW0jqWTws08gJvOh/HSeTBvyy69VLzIrHWdDvEm9nD4GqTyd4A+j6lGOJrUh8txAPR+CVX1+tKP/9KpVGULUYmrVIWTNnC3DKsqX5jJRsEyo4gwqTLEyhAOom+tO1/pRoLoUTrC8J99C4ErAZo6eaq0qWyG6QqE+BAI2j4XsY4NJV1gGILKRnaxeE8vXnvr1rZJ01F5/2lnGlhK0lNzsaKcaVNOeqLJogqoNpSrbjAQEACH5BAAKAAAALLEAGgDFANgBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ58jZ2wB6ekpAic/l5Tw6SEY2fnorY7Kxv8MTLIwlViwAAJkyMlJVVWYycrUYN6oAADAoXDBmZqUdQ9EOItMVFXJFRdoLGgAXFwBVVbvS0kEiT7UkJKqptr68xQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMQGEiwoEGDCQQIKACgocOHECMWUFgAwcGLFxMutIix40CNDCOKFDlxIYMEKFOqXMmSIkePGEGOnPmwwIIAOHPq3LlzgUKaQBsqRHCTp1GePgUQPco0Z9KgQBUyGEC1qtWrWBUMLdr06FOoM7t2/Ql2pEICYpueTcuUbNmIChNgnUtXqwC0bI26ffswr14BfOHe9ctzLWGdewMDiEu3sVW7eA/jTBxYck7KfA1b1iwZ81vGjh1DtjwZsGKHpAN4Lsv5cGvCq8GCDl33td/YUFPjDmo7b2+2u6MKkEu79mDSwWnqNn168fHNzzszPz27+NXRyKcrXt5caHTX32Fr/w9c3XpV7NC7A+De/Xda92KTzyxvfgB66erZN4c/Nvzt8ZkNV991/KkF4Fv6UeefbwsCdyBrAg54XoFtPQhWgopReJSGf6lHn3n3gZdfdh42+J6J8VkI1YfWhSjeiOm1h2J/keEnI3ES2sdhYSoGhSF5MxpYo4g35kiVi//BaON+QVY45ItFGokkg0oSyeST/2FJZZQ5TulglVBeuZyWX3IpoZcngpmkjGSe2GaKJeJ45o47yTfSjwG+SSOen0XYJZ2I9QgUnxDqKSShsvk5Z5MbCqpcdiEpVlICXIFHKaTNTarAppx26umndl0KXaSBlaTQqaimquqqrLbq6quwxv8q66y01mrrrbjmquuuvPbq66+zCgRTTBRl6tKwxG6EbEbFnlZSRcseBNJL0X50bLUEiXrYV0wuJVlS3m5rp0hbWQZupX6day6iKzL6l6EVlghvo/O+mxq7vLnLY708yjvmv/fGKGZ2/Nbp6Hz6GlxwoE9acMDDED8sQcCB+kswvsItfFnCDPPEQcQgU7yxxdBpXBrJ0pms2ncSgAzyBSKvjDJ4Ko8rWM0cbzykyy5bILLNEAGqM8Y0CV0azjVuwDPIEfx8sFk5H000wkgDHIADS7s8ccBAP2T0yjjPLF7VOF2QtcsUd+3Q12w/TW7UYFttdsQYTIABz2m7fbPVSyr/SHbJOWEd8QQCTODyA3mLneXUUP+dsk5KQ3wq004rzmDYbPJN804RTx6xz4lnfjGJogM+ek4tP+z5wzBXXvrjpA9s+uw5fXzA6gfELPPrm8fut+Zj8+Tw7QpBvLXrssMu8O+nK8+jALpXzHvwvmcId9tknhr9yNMvXj2Qjvf+F/Tb7568+H1bHz71RmUgQAblr0wqX5Oi+5+24M3/Vv3Z4S+e//8BlgAHSMACGvCACEygAhcoQGFhiwAycda1HjitB1prIcYyCUs2yMEJYiuCkrIfW7iloHARRl3fUpt3piKlcqVQbxBh3NvWtyYmyWlAaIJTd2S4t+ZZSUE3rE8O//ujpi2dj33pA1IQQdS2IpbpiN5bnvWW2KIm7vB7eQJeDYFoJB1dD4Z9wWKfaGhELrbwi05MU/cux8OgKQqHVmxOG732RczZsItDNFAadQhFNooRQlQsTh7jdUUpgk+LZZwiHuN4mjmurY6O9E4gaTPIRu2RiGt0kB3N+Cc0FjKJWfRhmDi5qE1u54+JIuMTSQlHT8oRle1SpRrveEZTVgaW+ZIlH1kpREae0pChpN0PFVnLSK4HlxlD5CqJ2Ulb8sWY2IPmG3vpykYis2iQvCbCJhmaSnbok/nLIAAZNE4H6a8smgKVOj8VFxGaM4MMjKc850nPetrznvhMFQf3uf+SZknKg9WqoAVBWCqARkugFPRnqRRwxhKZMF0uFBcYH/nQvKBQouphaCctp0lp6hKTzdHoojjqJo8qc5anEWkrSZoiZ47xpLtUjEqpydI9abNxMAVpShuayZLedIY51WN3ZsrEiXrno0LtY0d/GhGiVtGozkEqIZXqU2C+xamCVGFUgzpV5glzlDLlKVVbalJRbjGsG+0pWZnqRqlacqhi9arzhnlIsyYyMFilpFajyVY6uvWbIY2r+rj61rHa1KplyWs395pNxKaSsIDdaVoNe6i+PvKv/YLrZOWKProGc65gxatg6/rVs5IWtKbli2JFw1jMGqymlXUsVFbbGLv/wNZJZS3tXVU72s92NrS+RaJnr7rO4m5KofRrZ//ceaJzgoV/0Clnc+GZz+pa97rYza52D+jADyJ3fwZdFkK9i0EJapCf/AwvsghKP+Z2hYTWq+gII3pCxrKwk/JNC3xvKdtcQjaztGymMXOL2t32iZusreYvQflSuy5TiYtUMH8ZXKj/vtZMrXRpWQj829QeOMIavpBlj2ph6QW4lAMe8VYdjFJmoljFHBYucD9cTBirmK/9FQ6Ca+vLCQ+3wiyOKYRrnONHFZlqJebeiTOc4iPjNMg6dTGTbexkoEI5qbwsaohzc+PGUjhROzbOln3UZdeaOMtPHfOgypzkk2GY/5pqNvKXY9lm86E5qxJ+Jpuv3FUpw7nJc/Yvnwt7Z73mGUF71u2DAxTmuXgTwNYclTjd2x/ppsi5UEmncdepXEmbd7ugDrWoR01qfHY3oN9Fp3qHNV5Ul/efyhroqmHC3v1Rmin7DVB+xXLR+kKVvhBVyq29otUYRzHQyRx0ZBf848cqG9KDffaFX1nlHiq6xaftsIE3nOgCL7rB1xaynqvd1jq31txQNbYfye1XdF8Sy9EOd5SzLWMPc5vdl3W3Wg+LbDk3m87SPnO8vY3tcfcbyQFXMmfrvW0R45vECXczZXFL5YM/Wd7wpvex/03mh68Y433W+LotLhJ1L9XjOP8meURMXlWVlzvidmb2jIEMckIPXNvfvrfL2w3zc/c83d3GecHBTXBxIxrlXuZ4smu+bB/P3NlMh7bIT75z1GDKvJau9LAbhemgQFc6WTdQ14FiqlKb/exoT7vaXXXqg6b6ubP2SKvd/uqCxjqhdyfv2GliE3MxdtfvBbZF/751pAh+vkBHupkVfnOG59zhVYd41Ke98I0/ncuK1/fE6QVopWNz8RKv/Mg9H5agO37oNC/6vA1O+ourPuPBtby9Id96K09e4FNvee1XbnrZNxzgt2e802cP/NeHPPajv3zHI/9x49s892v1OMujz/yU7z6GvU8+8QUd/NDLfPtLd37/05FP9euHMfM/vy3nK27+fKd/37Fl/vT53X7Ji1/qrFd++IVu9NTzf/VHV31Jp3+fp3mRJoCgF3PQR38EWHro130KSH6614B3cnWwFnYVgoFcN2nLZYF2t3YgGIIiOIKmZkEQ9HaZFncdARIngV77hIJep4LJAi2yVnft5Xe/Jmw4eF8jJSOA1xS9FmzvdnxEZykRpn72wn4UaHtgd4TwR3HSl30O0mhZ4XMQWGxSeCJUSCA5mIBY+IBN2FtFeHr9R3tLaG1huFmN53uPh3kIuBxb+BhWeH+U932/x31p2IObl4RRCIZGKIb+R4YAqHP113x/qIYLGH+FOH8GEocT/9KFBmiHbYiHh6iHold+Z4h9fvg/TriH+9J5mchz0dWJlziB4Odvoeh+owiIUEeHuJd/p4hwq4iIEkh9i5iFKeKIRzKH/wd7AViIbaOLXoSEn6iEseh6lbhST7h+ffiG/UOKa6h9d7hmnnaBhccjCbBpxQWDZNdpYHeNBrN3M1F2JFiO5niO6Igr4kgS3Mh37TiO7zgSz5JB68iONghe94hOMlIi/LiP/sgk/ageChKQA/mPBQmQBimQAUKQ1sOQQOKQC6mQDZmQD0mREYmQGCmRfQKRG2mRHZmRE6mRH3mQJBmSJVmRICmS+cKREMKSieKS7aKSI2mSNImSJ3mRN/8pk1ADkyvpkS3pky+pkzEJlEOZkjhZk0cplDtJlD1plDNpk0iplG7Ek8JBldhklQgjlVXJlFvplD/plUGplUsJlkWZk19plmEploKBlWOJlmUZlWeplnTElm9Dl2vJlVcpl3dJlk3pln0Jl2mpl95hl1OJl1lpmG0pmM5BmHOJmHXpmHupmIsJmYXJl13pl5epmIzFmI/EmYNJmTVhXvUoEfFoj6MJEfMomvS4mp+Wjq75mrCZjm0nXqVJmvkId7eZgrkZg3nnajSId6dZE+DoFJsJmp3pg8OJE0E4eIkngJ45mZ6oMKB4jI9pmXkZnR1jjNN4nZjJndGIidTJe0j/95y8KIi+SIip+JnWeZjYOTTaOYmZCZXyWYsMGJ6a6JzGqZ6laIvpeUzjmZ/QuZ/1uZ0OiJ/rmZiJCIXy12XkCYnvR20G2p3sKaCK2J/S1KDEKJ3viXqBOZ9PSZ8Vap/nF6GA+ZYJyowL+p8HWp0UqqC3qKISiqAg6qIWyqAAWp5syKFuCIw52KBe2Jw8iqHLyIcpSqIeGpcnSqQvKh3B6RCpCWtN2hBPanca+BdVyiNRCgDkGJtc2qVeOmqzuV61iZpjWhNl6qRnKqUyyCy72Y1tynfJGQC59qF0iqS6Fqdzaqe/2J/FuaJoGKTgxKc9eqOHpqfoKaLH6aeV2aLP/5eUsEigMlqnHTqjjSqpOyqoQtqe3nekZoio+lmif5mk+GepyweoitqYmhqBpEqNRuqoq1qANgqhphqjLPqdOhqqjwqf3smpJkqp4/eqBTqroBqfvjqqhtqpkFqrvIqrY+ipASqJt0qswBqpgeisX9iq06qsxVqHy8qqwtqtu7qtr5it4omtx9qrzZqskQmtZYiu58qs1aqui8qugzip5Dqvosqtrnqo8oqqtLqu+Tqu7+qtmEqoGSqw9sqv76SaDAulHFiNdpelW/qlFFuxFjtPYcpqaaqlGzulyfWm8NibdPebepcpeNqn/4qvd4qDP4hr1/qt+5qwrdiLRKiw0f8arvfqr7bKn856oQY7pMXYjDCbs4nKqEpaozA6rDgbr+DZr1aXtOA6oTs7oLoarAV7qkU7tSHqtA3hs1j7qQHrnkJ7tSmrs2ErNRvarvAqs+46s+ZZs8hatdTKtmtbfDRbqTarttI6sEvrtjmqt6horYP6tYb4tngbtzcrtVE7t3ZruL+at/XatpJbt5TouMaKuICruDE7uftnufq6p4KbqUYbtEU6tHyruenatHJbcrFatll7tnGjYg5gGQ7QsbYLsvJ4uyZTu1HDuxJ7scAbvMJbQFHqsfj4u7hrmqzpsA1bKgjwvNAbvdI7vQiAskqLui1ZEdS7vdL7E9rLveD/q1UnO7iuC7Yr+0ItS2xASrbXy7iV+7eRW6rsu7jamrqmyLX+aa50u7d+K42rW66mu79927jwe56YG7/8y7kD/L7+m7gVCLWbS7mdW8Bwe6mh+7Op2mO56sD1e7ruO8ENnLkPrL8KjL39q7ocDMDzG8EJzMAoLMKsC8FEa772y7P467Xl+6ywq8Ggi7/WS79/6rkIa8E+TL7ty4R3+7gHbMAl/MELDMIvjMCBW8Siq7U02rOte8QAK65iW7orPMM6zMVoO7YXTLg4GsJSPBOzKxm+u7wR68Yfu7tG08atObx2fMd4DCvFe7vI28dwfLx/rI9mbMRAPJXfC77b672I/xy+UIXBOUx4LDu+kvm6LPzEliyLrih8evnDlWzCBIzGTCyVnAzGZxzFoayUo+zBHXzC9/u/GpnKAuzJLtzKKfzKhNzJTozJSXy5WgnLTbzKn2zKFSzKt0zKDnqFjTzIVbzDhSqWvizBl4yMQqzJcvnMLXzNsIrMk0zDqrzFxvyj2xzGxuyjkSiY1hzNwDzLNuzKEnnOsozNujzNm7rJxdzNKlvDVFvL7VzPsZzL0kzBh9vL/PzL3sy0tAzDIunO/pzOUHzQaSyPzfuxfhzR+CjHvXu7eZzRGp3Re5y8tjnRzBvSb0zR6LTIjLzMuFyXh2zS0avILD294ouDKG3M6f9rFMuJeEMY0ND8zugcxACtxETMzpQ8zkCroWRMxY5sxSi6pAFM0Pds0Ousz09Lwju90D4NysNMsGX8yMecyfPcw0LNzf3M0Nns1aoKuafc00891lctzDqt1UitzEWdnUcd1uJsz2YrxrFb11It1k6d1/i8tXaNw1q81sHs0GlttVtd2IAN1fmM0PfZ1FVN1vH807wsv4tdyI3NylEN2SMq2fBs1S+nzYGa2Sld0Jz92A8dw1Qd2pT9z1j91lNs1wr92kgsz2cNFmt8GHQs0hIdyLhp0RaNvBtd3MZ9sR0N0iPt24BM0rgpsS9NvbWN2n/dmSsd3S4d3dAb0y8003j/TVHjW9OGt76mTdQZ3MxB3dd3zdaGrc6qndgjDNpqvdmH3dmrrcLl/d1+7d6Crd6ErdlD7dj97dlTLd88feCw7dZAjdlxzdUH28VM/cX6vd71/d5ZPdvqPd3t3dD2Dd+sbeCiTd/8fcU3nMUAvt8cbuGyrdgNztgBntoDft+RLeHsLeIpHuMejt8tfuIUPuJLjbStPd8vXuE4fuEsTtsDPdltjdhGHt80Xt0oXta7/Llo3eS2Td1KPtpm/bJPnuU2LuW4rVW7TRi9vdxm/tvmJdykUeYfe9xu/uawmdzArZvKjebMrWoejZra3b1J7tpYXpXX/dLZvefcjVFJzeOH/6dfic5rXJ7fNT7kPn60WCzDE17KTL7iTu7oUN7jN07ig23ip73hYG7ZVL7EVv7nQh7llR3bCw7XSO7dMO7p/g3q5j26Ru3Fmu7lkN7pPz7pQY7gV67lUz7EDP7qh67XPFzlmB7sXw7sS97hpz7jue7nor7qCn7Zrp7hfZ7qhUvqxJ7tBK7qIb7ro87q2I7h4c7p3G7p0L7sOm7scn3ecXbk2g7rRC7r6f7fod7sCX7prY7uMs7vzE7u1u7v567GtMvHc86byKvmCZ/nZgrnEj/xJCjnzk3nC++mdd7ckuKPDPDxIB/yIj/yGi7wqO7s1S6TUgGNiL7tKG/ytiyMtv917LXu4gL5IRSQ8zqf8xXAhfau6+I+8Dc/TRWQNSo18/Hu4ACKYQrQAVlDAXLo8uMe9Cc/9XFyFR5wNgegAY/489QO82CvkNWhAFp/AB3Q9TRf6VKvktVBAWV/AD0/jGn/6FSf8mzvJxrw9g/DUEiv9Elv81dPFW4PMnXDKiHQ94Bf8gRv9VxC9iBDOKwCAojf8l7P7WaS9XQD+akCAoev+HUf9p+/+HcvJ02v9VC/i2tf9UKv7i8v9m9U9GcjUpO/76Kv+rZv9wk9TSSQNR4Q9ZXf+qEf/IFvFXnPMx1w9J7P+laf/G82ABpQAdAf/RXA9b4/95vO/EtGU9YP9Mr/v/rN/1S/v/ypH/MsT/vCn/xa1dEioI2cBvFo6v5qCv8cK//Gi+fETfH4n/+lZvGwZgD+//8AYUDgQIIFBQoQYFDhwoMCEgCAGFHiRIoFEBagmFGjRQEYNX6MyNEjyI8jSWpEeJIkQgMNXL6EGVNmA5YMbQ5ESKAAAp49ff4EmlLlR6FDMxY1mnSlAKUUETYIEFXqVKpVA9S8aROhiQVWvVJdgLQpALFNy45Fezbp068BMgjI8BVr1oUIIbTFe5UpWohqjfrl+3cvX7ZVL0SIgFAAYglV59I1aNfqYcSVIzxwPDit5rGAA6v0fLLwVAkHTCs2fcAB1ceQCUqmWjr1/+wDmT+HXvo5MG6Qo6XORm36AuuErhXCluqANu0NxG9zNgtdt+DnUKduAI5wtoWprY0bQB71wnLaq6XyJip9rfrpotkP9W2BdnDTHLoX//5awF2p8snPjuC+5wZsL7rqpopgPu1maywq74wLj4P/aMPMwfdAu9C9Ag3cTQDrAnhgOfpSOw+//BriLwDZJkztgxIJJCzDDSdCDyUPpRpPQQGW406vE/VLMcfUMJhAMSMnGMFCGDebcb0Do1KOtiInoC1AC3/Eab/+pDSySwGU7HDJJnPr8EMVaSMSA9p69BFL8LTEcTYivUQoLr3E5HBMDcukikXTmivRzTdTjMrPA6gaBDNGPPU8SsbebiTNT+IEDe9MFp0LM1NGH30SwQkRvdLNSgP4YMIKX9SUyU055ZMq/5ZzcVJR4ZwqRPKsFDDVzhwds8ZGzZTqAWGHFda8+yiltVZihzUWVUV1XZVGXm0ENi9ZsRzVWmdVfTbaRjvV9tofsw3XV6em/dbbdGOsNtw2sU3W3US5pXcsk5ISiS+O2tXWgQddQ6gEeafytyN9LzrYYLQKCAgAIfkEAAoAAAAsmAAzAN4AvwGF/v7+RzyKAAAAAICA0M3c6ejpOzs7/QAA29nnyNnbAHp6SkCJz+XlPDpIRzZ+eitjsrG/wxMsjSVVLAAAZjJzmTIytBg3UFZWqgAAMChcL2ZmpxxC2Qsb0xUVABYWckVF0Q4iAFVVu9LSvx8ftSQkqqm2vrzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AExAYSLCgQYMICggQIPCgw4MJFhYAQLGixYsYFQoogOChR4IRN3b86DHkRIwoUWrkSLKkxJEtIUpMSdNigQUBcurcyZPnAgQLcfYc2nPBwppIKS5EIJSo0wBGBTB9+jRqUqRLm1ItmnUrUatXaXp1SmDhWKJHw6ZcSODsULZue6ZVixFu3J127+acS9eiXp1lBfzdK6BvXQFtB+fVy9cwgMV3IcdtbHhwgMCWKfeV7JbzWc10PY8V7RW0WsuYFRd2XJH0VtdUTYeF/ZS2U9lXUZtVzbo14sy/efd+HPyvbbSre+sWLLz38bfFGSdn/Vxu9MjTHS8HPpx4YuPXJ2f/N1ydZ3m84/tub049fGf3n9OHhj+afmn5pwenBt/9vE7/hPVn32sDxoZfWOvxNxyAATCIW1IOFljbgbnpt5uCzkl4m4bICfiddB9i112C0nkIXIjimagaiu+NaCFzGLbHYnwz1qcieDXe5+Jf+5W4IIfQ5UjgjSByNxyJIv4opIFLTkgkdk1uuKNePSaZYZTIYQndk+Jpad2Ud1WZopInGnllmexp96KZMqIZo2MRemkehUkhOeaZK7IJJ5DWyYkemHGJ2SKZeaZJHp9z+vkfnUgNtkBIwJ3kmEYJaCVepapJahillr6HKXia9qXRQqSWauqpqKaq6qqsturqq7DG/yrrrLTWauutuOaq66689uprqg3FhBClwsq0UW8rwVTsQCYpu6xJyErEQALUVmvttdi+tGxBzW4LUqdj/RTUX2BlOJVeUZ17V7ntMTDAu/DGK++8CnSFrr3r2jkonjjqeWgC8wYscL2IoqeoZTkJGh+X7x3cIKM1LQSwwBTDS7DDDOq7MKH9GroZQxWHfLGbOlFwwMkonywBwjwpbCPHRXo838QhDxxnTxykrLMDLAN2oY/8xvzmvzVTPHKhOkmgs84W9Jzwz1a2iTTQ7dFctLxHd6zT0ktT4LTLOsIMpb8fW321xTfrtAHXOoPwNdR3Sq011XuaffYAWQvtANtLr//MMthDit0l2TPfjXXaAUTA99I9A24gwzQSrpbEhseb99g5WaAzBhNgwHXjcO8rt9B0E135u5cPntPeKU8gwARLPwA6jKV/jHHBiwpo99mpN7yT0iiTqnMEb9Me9Z6336776agjnlPKwqfs9eySz4Y7Ycr/uPvVvUe+0wPBL5Ry08VXf5Xzx5t+evf1oaj4AdGf7PTTxseNPMnpl8083ugHYDL84lPZ/C4Tuo0FDXND0x/z2HefGpFqgDtxnJME5zuZTQ5kC+zfoupXPgtaL3nmgxAG16dBwnCQeh483/Uelr0Mba9oDCRQjTIggAxAMCePKmB9QkUXTqnmU9LhoVr/KKWAIhrxiEhMIsGAiB0miudXUIyiFKdIxSpa8YpYzKIWY4WtLnqRAZTyohitNRPWJMtbzNIWGqFlRjV6q1trLOOkFLA/BiyEjiSEGE3wFZd0gcsr7NqTuvrIR7cE0jB4PF1IEmk4gkGufSHEygoj1B1GGm6RGdTjWibJyUc2MJI1seTdMJlHT8oQlHvkZAtZI8qzkbJyjqSg9xI4HxCmMCmtvNorG/kgSdqSlhf8Ze3oksui7fJusTyg6oD5QfzZD5H7OybvehkxVaJyk8LMHzGjecdMmpJJ10RJCV/Wm2LWTJrco2Yqs/lM2zlTdI4xZ8jQCUN1YvOdBhwdAoep/xZ5VoyeNUumPpfJz2ZOTZv95KYA/GkzTYrTmrf0JT7JyUqFMjRgAr3fQdtZy4mGrZwW9aYsIRnRarITntBkHkBFZs+HnjSfGp0bQsNy0YCttGIZPdRLKRpT0s30KjWd102N1tLD7PSjA60gM4Ea0lKO9JMlXadHAwdSlXbTqcpUakFVeFSqVtSqCxVpVme5VRF29XHDUaJalbjItbq1iHLclMT+eB8nvkeIYfEheOwaH7xeZVRbDKxgB0vYwhr2sIjlVbC2lRCJoZEAbJyUG7cFxzfGVVTSGqNmJ/usy/aQrlQRlwBAW5WiXqSQZ/EjuUxrkYW4K5ODNCRrK6Ixnv/qdKpodeH+Yphb1tQWqT3d509F+MKAUvJIa4rqPTeKUgXmcZVq4pEOgXtb5sJUfbA8rnKSu1Sz4naCus0kdCvD3bJK1Lq2dW52O4lc6Z4wvR1FL3XVy0v2bte94TTqd6Wkvd1q17flHe55ZcrRCxaXpfYFMH6V69L9dqi/4s3vRX7r1eASVMDVPDBO/xtdKk23wtUlcHMLF2EGY4TCvbWwVjGcSg0TNcEdDtOHUxxinxbYei5u6HjVE+Abc9XBW4LwcyXslx6POJhA/pKQ17tjuqAYvEklK4s3mWOMcpi8C+7ugG18ZBz7F8ZY9vB75xtfEV+Xvsi8Mo+zbF6TJnn/TssrsZZr4ihIZSpaDCHthPi6Qzwn4K1vneud25jYQhv60IhOtKIXjarFLquxDHlsZOUqEklztliTxmyl47hpyx7LjHr+ClBGu1qHHia2qUVtuGarFKmEeiiqvZepJ2zkM5eZy7ZGsnxBvGYxE9k3Z4WyiqXs4zrVGr66NjOyDapsMueHzVNu8K5p7M5pCzvMMh4zr28t3GJvudtdrhC0ve1ma/N3rCSds1iO7Wxm43rZPzb3g+/raxOfFqLqXm6zt/3seudb2vumNrcvTO51jzvc3pV3kNEN1X+fmN38dje4cy1xgiO8URAXeLLf3e54B/zavc72r1sd7HNHOd1t/za4v1Oub45H3OMu1ziCMg7yga+44C2fOLyNfXCKw1znHU/4x00e40DNuOYbB/rLhR5zpM+85zv/tsV9znSly1zcK4+2fhWuZIaf0t5FhnrQpX7zi5d76PNW8LrsDCo/vxo6fL6PX5OiV+nEnUBzRwpgGc33vvv974AP/KnyXhNicZrwNDnj4Z2Fac8O8dLCqixlHZ9XAb3dJ6wmDqpX7epSC2jzgFR16GfdndZq++pVn3rUz950opf+YyP3zpv/9NSvO/z1von9OMfOeqs7HffV1D2+WQ7w1qcd+BcUfsmPX2Pfux75IlT+7HNXe3CCHfdHf77NiW12qXIdztCfj//0v097r1v/9sDPPvOrjfaFn7zhxIe++t0/bJRr/d7Lp3/4JTn+9nf9/bYXf8g3f/9Xf/B3f6aXfwW4f8F3fSQ3fQFSfRPSZAx4GP1nfPrHfhi4gBX4UBfofOu3ffaHc+FHgOBnfhP4garHewxoguUHgOcngAkIgQ/TgdHngLJHftSHghtCgTbYaipYdlRHdtw3hBXogjsIgymIg7u3dHqHZ5Hidj90eXOCeClRd01EhehhhSoheF74hWAYhmIoRY5WLJBWhpFHeX8FeTFhEtOiWWKkhnTHhi0heZ31aZOihTg0anoIFZknelsRFa+FVe0BeoEIiKGVeU+mfUl3KV//9k1LiH60RXOMWHGe8ogS2IOxt4gh2IiXKFZKqIk4yIkZKIJ1hYk8mCWbSImdaInxUWX08ofDh4CTKHZO2HuOCIoGGIC0SBGkyIGmSCCweDikB2w0qIisWIqe+IqoGIqqOIrJCIzLWB/DaDmyqIAnqHYix4ScVI1oU4wPqIMRSG/bKInhuFfNuIsx2IsA8IvZqI57lo7Nt4K3qHLlKIPG+EPyqIEgqIxPl3UkuHXoqIvzKISrZ49Gd3q/l3qfSIjwKIrm2I7R+I4FyYwEyY/0iHoYZ4saiYsNuV6QCJH46IsT+YIPiRze2DzgmIP+R5Ehl5BBaJEOWZEj2H0p4Y4m/0mTp3iRwbiOAUlr6MJ2QSSFewVobiWHeidoe9WHD8OFGTGGUBmVUjmVUemUF7EShGaVNoGUhceVieeVVwiWXYiHlKaVFWGWpvcjArKWatmWGcKWP7hJbtkecEmXc7kndRmX93aXh5KXfcmXH+OXetlqgDkfgmmYhXlBh6mXrCaLi2k9j6lCkWmDjbmSjpmYkImZkjmYh6GZIjSZkgSa1SSaLQiOl/mWnhmaqTmanLmXqPmadgmbeLmaqdSarhmbuDmbsvmXuxmYtukbtCmXwflQpCmcvTmYlVmcxDmcncmct/mbyemcaSmdwEmdhPmb15mbvKmdvnmcimmdpRed3v+ZmeO5meX5mdhJHOB5mtyJmOepmtgpnu35ne/JmvVZm+kpn7o5n+TJn+bpn5RpmpY5oMrZnPdpnNApoAX6nPvZoNvpoJypnw86od0JoPBpoYWHZ2iJlVmpoR7aoSAqWWSpaVRZoiZ6oihaWGgYE2doaSPaQ2L5lC/6eDOaV3RIEnbYeDX6V30oWj3KnhDqnoX4o50na4DiFhJUif8ZpPTpjEGyihy5kBfKpP15kk8KjVGqpOh5oMuZilcakTiZhFS6pBTak5E4khKZpa1IphU6pgxZhAd5kyUppmXapG5KhDVphAiJpEg4jhiKn1wqkBvokk42p356p/b5pzmXkVL/SmeGWoPrSaCZ+Ixg+qhAWqdVqpO8+JNhB5A22aWKCqpW2idQ6ql6iqCICqhOSqpYaqpxKqqYyqZmKpLsGKaHGqtbGqqC2o/S+I/3yI6X2qaa6pOf+nBq6o+yaqe4iqcHyKm16KosiKrLmqijmiil+qvOmp26yqDDeqa1aqmSGqjcipEGGa0oYauQKq7Tqa752JI5+ZKpJZTYgZYAwKEiSq/2Sml3ZyD7OiH4mqIAG7ACO7C5sqIt0aKH96H3qrBlyXhpuKNzCLF616N86HmRuqCmZ4iJGK7aCJPciLHV6aW9mqy+6rERGaxCWq3vmrLYZrJoirLKSq6nCqvwehZJ/7qmuZqq0iqzr2qgHcunCqmlUzqtqqqydCqsNTsWN4usOUu0OzurODu0SesVSzuyTYu0LMuz5uqzRQe0wgey2mq0t4q1/YatxTquWZu207i1aFu2LgusHKuzxQe3R2qzfZqu27queduuL7uS6AqzmUq2rtizbVuyXvuxFzupTCu1hXqsVsu4Mau2g8u2etu1dhu0UUutTkuzULu4mju1W1G1hBq5pBu4nfu4n9u40FqPT1u6JPtz2aqe7bW6HVm0m8u13Sq0qeu2h3uycXu7+Pe1s2u2M4u7gvu6b0q4lduy8Xq3MXqVz7uVEtuV0/uVeTZoC0toBLu93Nu922uwJP+BsJ72r9UbluU7luTLEovnovhKseNipInLrq2msaVVpOuCjI47uqbruld7utbaqsSrvCG7t2Gbu5SKpn/7u8fbv2u7qWfbqQFMuQMst8brvwZzrW8bu4CLvMzqwMULlLTbqLa7wJA7uR4swCSZvyvLvyXMwR4JpxKcwiGsuyMsuS7sfe56tKobwazLuSy8uyZMrB8MwRn8wBMMvMurtSccw2k6w5lbwz8MxUHsrbGbwGAru/LLkoO6woZ7ucKbxdd4jH4Lrle8wcnbrEYswzxcu627v24Mu+Xaw8bqxJ4rxTfcxnAMw3IMwmsswnjMwG98xku8x0Tcu31bxgOKjVz/jHV9TMN/3MKAnMd5isIAwDPuZcl34QDRe5abTBH5qmnkezCafD2jTL7ee8qonMpjmL4hWpYMC8qvDKPnK6P4igC2fMu4nMu6PBFLocu+/MtmDMnCPJoc8cvGfMtHUczHbMyZ576kBr9grMDzQb9OEWv3O8YqrMNR/MhALMloPMTP2shPzM12HMmCLMSUbMXxS8BazKv6y8hFDM4FTMLdfM5UnMZNLM51TM787MNTTKtVTMbrTMHBK454a7lK67zSvM3+7M2DzMbnKtDRjMiKrM1dnNCYu88Nbc7DjMNbbNHwbMh0O9AG/KUILNHsHMwvPMlMrM4THakVPbY7HM+U/6zS5dzB6NzSKE3QSWzD9gzQ+OzSKb3Q/3zA35rNMs3QFazUBZ3DSX3RVKvQiEzRYly3GP3FQx2SRh3QSH3QSHzE9OzR7rzISYHJ2WbWblHKsUyjrExoogwgaq29qjzXdF3XiNbWrtzKsKzXsozXeT0pYGvT/bzUkKnMy+zLyXzYzLySgU3Ud7zRkkTNX4GI9ZueWMzTYO3THb2ocQzR8qegJB3WnK3Hnj2AoP3S0RzTXh2hp53VMF3V+dnamD3Pmr3S30zJjT3Vr23QmZfboV3bYs2ojlyCsv3VtM3UM8jbjG2dgg3Z9YzT9yzP4Vncou3cN23bD+3HxP3byJ3ZDf+c04SMfdQN3INduA4N3qWdfuPd3ccdyNAN1NLt26g91Krd28zt2Bz93NiN3tr92dzt3vod3J3d36b9349N2Ocd3bh937qd2rAdn+sN4Nc94aPN0uHtyWu9hrMMvRsuvX6taW9Nyp1cr3Zd4iZ+4lv04Wyd4Rqu4ivO1y8+KYr9y7wsFTOO2Pi92dYtl4Z947ac2D6Oy81MLhULza4dzZINa5RdzfhLx6hL4Tse5bsq3OMc0V3d3Aie3wJO2gReyF6MuPMttiyEwSKtwTke4BV+2zp95WcO5Vn+3lsd1Dtt3Jc92+1M5Ro9x/r85OXd0+x95wM+3HpO00yM5eb903H/Lt1CbeeGntxOvdozXeb43OjeXdQmfdRO/s5a3ueVnuDwnc5zXt1vjuZzi+d8buWZTtY6Pupu3tQf/dQh/eW+2+Cr+r+Vyua0zuizyNWpDtKbLuWHDueXzut7rumrHuykPuWBXuWDLunSTent7emJDuq4buC/ruxcLugXgdZAy+1jEdfZG+5/TWkhHuKmjOLonu7qjisu3uIsHrHt7u4wbqMdfpYMcO/4nu/6vu/zG+RC3uacHu2f2eM+DuT+PmqVVEfve825TudLPtn2S0grGVTyIq9m3vBKzN/abhEUHy8WP+kAD+yO/uqQnlBghdW6rtXDjs8dDy8f/+whz+pp/57dG18RLf8uL1/TMY/sW27h6W3zCoXyDp/ID/5ViiTV1n7sIz/Wvo4UNz8AOV/oO+/nls6qEfn0UX/hdT70Km/1aIr1SB/mQ1/fEx/0YH7ktX7BOAj2GX3qMi/ynY7oKy/dbC/0os7zM6/xzH4RdX/2Ka+4Xs+OfT/rSb/fCs7Eg3/Ihd/zaq71iT/SYp/xh+/4Zk/4kW/Bi+KDTHXyfs/1gG/rX2+UaiUCC+HtXgHu477X4g7igzHK5o5n6x77sj/7qRLvSVnvnjzin9zX7377tq93B2/LNR78Pz71ca/0wEnwN27w/j7k6FLkDL/4Aa95RJrkXIHNvQ7r1473cP9/jiTf5MWu6snO/W+P7T7f5eFM6FoP7Vsv+Z++5tlf8hI+/WGs3FYd1W1v7ONP9Qfu6kyv/QABQOBAggACHESYUOHCAAQECGAYkeHDghUtFqR4USNGARs9Csz4UeNDAhJNIiR58mRIkRZVmnQI8WVEli052rxYEyfIjjs5lpw5UQDQoAp17iy6MGZSoz19DjyKM6rNqS1TMkU5FGvWpwS3Hlz6tarIsR/Lejy78erWtVjTavza8GHctzmddq1rMW/Ftkz7Jt1bMG5YtnefBiaIGKphn3+LOg6qWODguWIZ75QMIPNmrZaJur2Mk7LMwl0Xm+aJWnNoqp0Lf/bLuuX/aLqyydo2ixutbrWuQcMGzBvuV8KgVXM+Ltwu8Me+Y6umbTn5dNScmUd2Hhw68cqlqyvXC56v+J+1r8/MHN27aeTfk59/CRk9ea/cSRt3n589/cXwVcqPjz+BHLCPQKwceKgA0wpIcMEGu2JQAAUhBPAkBLML6sIJn4rwIQ8/BDFEEUcksUQTT0QxRRVXZLFFF1+EMUYZZ6SxRhtvxHHEDX2KcEecenRQwiB9/PFBDo3kEcmdgISQOrx6YiBKKaekssr29nPyySxVO0zAhxgYIEwxxySzTAWu1FK/NLHkks0nEygzTjnPFHC1LRurE802MfNSADjLrCBQQQO9gEw6/+/kE1Gp8qxTUav8LPOCAyallFIFxjxUzS4dfZTTPXv77k8xFeigUlMrwFRPPD3NjdVPwwuVzA9MpVWDUVVNVNNVdX211f1EHUABWmnt4FZGXQWV10V7zfXNMSsYltZCg8V1WWVbQ5bZ1H4VU4Noh700UzfXJHfTa7UdL9YwoTUVgwkwkJbaY8+9LVu70O2U2zCFNXUCASYwdYR9q8WWXl/HxXekPoGdtVIPTbVVXnthNXi3iXuV7CFgSXX4oUpRHXhehHcdudmEFVZXTEkpfdhSYy++qeJkSz45sYXLJIFljyf9IFWRyyUZaJNrTlffMb2dtOUOLn1ZZpSdvhdqZv8zhrRMDS64wEOsI26a5oK9zhfsmqkGtkwP5yQ4bKGtFftksuUM0wMBPED7Z3PbtlhqjG+GOwQPQqgbZpsFP03vgoi0ickkBRBBAccfhzxyyelEvCXFF69cpMuXVLJIISnMMXTRRye9dNNPRz111VdnMfOPNk+889gFMKB222/HPffaH9K99941HjJ40F33iPiD737SgAaWZ755559vgHffp7edpAIQwD577bfnPu16iY567dYaqI168x8yYYGtFvD+ePAHx7s38i0zn/qHIJAoAwEyMKn9vN+PmfgeNT+FWCACB0RgBCSwEOnV73cCwN9CDOghBD6AgXYLGgC3JcD/2xAQIRL4loGy4kDf3W8hIDxAyyZ1QcJtUIMuzCCePHiQbx3AAk0h4QMjiBAH6EwAldpAU1popxcWjoO5meEGangACiSkgTm8nQkTYgEfmkqEAfDfzIpIteR4kAJLPAAInEg7KOJOigf5YhUrFYExDjEzLeSMByMAxgMs8CBPLKMBzhgAEHTsh6ay4B0xODQNcvE7BHwAHVcoyDxGEYIfNJUKKcUBrhgOfkUk4hF3Qz79jehdtGoiFsnYSD0+8iBU9OOwQpnFp2HSkPshn4kAtsYRkrKUO0wjpfw1y0rdUJBubJQlYSjDAHRSRJ+sVChFactbTrFS7oKXFSsZv1ZusLFPXVQIGINYS1LuMQBgtOMvhTlMAL7ySTNE4bdwaEtvpjNaQhxnJq0Zz4fMMABzjFY4GclOUyaEA98K5DQ1qcUXmrNL9sylqSi5zm72EyGJHBYb4UnN8M2TouGxZwAesFGObvSK+2zoDhPS0Y5+VJTAxCQ5v3bO8vFTpM+hZzAvGkCW0s+ltUFpSg1KTJuGFKcxVY3xNAI7y8muqALI6EwuxMyHlOArGhLekT7HoYAAACH5BAAKAAAALJgATADeAKYBhf7+/kc8igAAAACAgNDN3Ono6Ts7O/0AANvZ58jZ2wB6ekpAic/l5Tw6SEY2fnkrZLKxv8ITLI8kVCwAAGUyc6kqKqoAALUXN0xYWNoMGTAoXC1nZ6gcQWxISAAWFow4OM8XFwBVVbvS0tEOIj9fX6qptr68xQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMQGEiwoEGDCAoIECDwoMODCRYWAECxosWLGBUKKIDgoUeCETd2/Ogx5ESMKFFqLMAggcuXMGPKlDiSpEOTKXNaLLAggM+fQIMGXYBgYU+hSIUuWKizKcWFCI4mnRpgqYCoVKladdp0IYMBYMOKHUtWAVSpWZUy5ZozLVUCC91OXcsW5UICcpPezYuUbt2LCxOQHUzYrAC8fIP6/VsxcVC4AhwDXcwYwF7JPi9jpsw4MOHPYg0j3iyg8kXMPiGjDsD5r2bJrx23rusZNGjRq2ezXa0atW6usRMH5/vbaW3bhYfnLd6Ud1zfpU1XVC6XulvmOo8jL2s9Lfa2qHuT/5Y+/XBu89DJP2W4PTn68eqdR06vvntW+1S/p9TePixu+uTJd556lr0Hm4GyRUcef/0N8B98AYb3HITS4TcXgsIpWCF7DYZmoV4amiYggBuOdqCJCRLIYH8PHkjgiBSa9mFfGBIXYmUrttdiivFJOF+MONa4nJDV3dgZhx2CtWOGL/o4YH1EXheld0a6hmSSS9rYJGbiuQglihmCqSWUgiWp5IxC6YcSjF4uOOV9b+ZXJW1XdpjlkFtK1iWPboo5pJ9Fqlhng3cG2iOXE7ZZ4nmASilomWYW6uiheibK56LQNUrlo2aeGeeFeTq2J5NfMvqkm5BiiaZic+7mJIkyfv8KoqZwctqppJtSmtgCIZ13kmkaJYBWhsJC92tlwSqg7LLMNuusYcWSdixjGi1k7bXYZqvtttx26+234IYr7rjklmvuueimq+667Lbr7rvwbivTvPQyECy9+MIkEXkr1WSTQSb5+y9I+0rX78AQ0YQwwAovDJICnQ7AwEIQR6omRmdhZhVWkm0FJceObTxsXiJrXLGZIZ1s58WAyUojrXIKCvOFM8+6msodpnwryxatOpnLabYKHNCs1vzyzRHrbLHQxhH9s9FBy2xqpkI9cMDVWF8tgVA4N6i0qkx35fRPPpMddnZjZ5Y2axhmkPXbDgTVdX9fr3x2TmWrDTWrUlP/7fdPErz99gVyJ03xznfvt3bebPdN2t5PByW44BQANXd7dRPKc3mQm9252o6fOLVPHEz+dgaWGy7A5cgZFnqCnzde6t+i+0SB6YJv7RPryGXO4ubrxc448AUKv/h7EeAu+E+82+a7jsQPf/zrYY5+wdsWTGCB4Kjvrnrzn7k+++OnYko+7W9PIMAEgj/A/PeIU2+j8fL/OXoAgWNt7dsRpN7p89sRX5/ud6lYGe+AktPfQt5Wuff973BLq1+R6De+2p3vJ1a72v6wxoHCPXB18asg7MpnQAJWLyjJO8AGr8Y1+EVQhCcE0pEQSDvbaXCBWmvhB8HnHglKiYIDrGEM/4P2I6TwkDAAbF30jgdE81nwiawqog5RBkGw+ZBKTSyhEOd3ISl6kIogfGEQL6goLZJxhEnRgAA0QJVnufFZKXujHJVVMGAFZmRDitaBpvWXYOGxSHpMUCAzFK9CGvKQiEykIhfJyEY68pHjaojDCJCQwExyIDgxWMMmGbBLEiCTdtxIS/KVr006DJTI+mNaiGIUjS0RZLvKWMeW+BXEwZIvHosQorw4pjFCkVSoihiuaqWrxIyql05EowytlCq7ZbEybCpgkGi4TDo1U3PTKyZfjoknGHIRVkG65u+yqctK8bKbvlRmGcMpTOmFypiWAmYyhyjNI4kTeuSUTjTlaf/GXyIzVvcMoDu1mRduGiqd9OQnOxH3TMbs85/TNCFE7dnOfIroVdVki/QaysyKcrQuD0XnPL+Z0aEFVIkWhSZG1zlDiYoUoB4loUp3KdOWbvGlC13aR11FU3Da9IwKpShDa/qXkB50pPbzaUeHqlSQrrSeVqImS5eqU6I6taclbZpUoWrNmDaVp+a0qkaZKFaTejWrzXlqUKPq0qPClKlo1YlRJ4VQkk61q3C9K1h31Stj8euO0BmkjfhYl2TNUY6AldZfIcnYxjr2sZCNrGQnCy9JOqySDPEkKqllyoV18pKb7WNnEfZZTtYxlRorigBUmZVcbuiWJJNlyF7JWq3/yDaWiVuTWifK1pu6NaK+pas+d4vTn/qzuL0FKm+vGtavDm2ra6UTdJe7V3ie87fGVSdXxzpd5DJXVPGk7nPbKtx+aje61d1meL3LXfLmqq5JjSt4sKrX8Qb3vUidYFmdMlf8mjeh4tWqe4lZTvBet7zAVS577atg7BaVuA6W7oBj5s341pe/EEZwdgG8YAHfl8DDpe92GXzcCLf3wxQusHX3K7bumpjE5w1wWkWMXhhz+MUebrCGH0xjGaPNxTuWMIpBRVC5GDTIJ9axfxNcYiRzpb8g/q9dR5zjJi/ZoRm+cnKtHGUmx7jDM26ufBUHZC0LWcldxnKPwfzjCRMZ/776dS6GO9ZXxWqSIbWVk2CHRFi2+DGweb5Qn7lSLcoa+tCITrSiF81obA3aKcHS7Gk5KxJJVxq0kxbtpU276VNmurCCCjRSXBsr2MqlZLPMLcauImq1sNqVqiZQy1jc5iGD6Ipw2qmsm0ZrvJFVzi128613vaFek1nYNMK1nHRNbF8b2y6/HjO0y5zmZvMa2LVGc4rz+8NnW7tA3m4ZtbctZQtT+duzxravx/1mbmMx3M1eL46DbetkVzjO0v62vJ1c5S/PO9tcJje6xQZvziE7avfutroHvm8zJzng7S43vi888PIUPHgH51vC371wdDe82meG+LDhrPB8W/vjAv/f8JRr3O8b87vi67l48TL+M2XTTOaCkvlGdR5tisMc5RH3sssdbuOV+7jiQB+5u3PNc3YrHebO7viqnW5vknPc5PE+MNFbbnQ2r5vmZiPQo5uiEV8tds+AbHXQxq6TP5MG7VJie04K3ei62/3ueM+73rNl2YVhtu+k/bSfRzswk4ySlPQSPKEJ/6/Senoj/FI7UFi5WlgLytRusUotxfhayf8E1bONtUWgnPItC7KiNreZ1E+TZZA//PQhtDrTVz/61pc+5LDnvMRL7vP5irn3xw4s6jc+e6w3xvZBV3kehy/7Zcuc9Mk3PbGYv3Tn0/74a/7314Uf++rf/PoUgf7/04Vuo5PaRoDeV73xw4/88SsfkNTf/dWBr9vsvxzgubci8a2/fgCIv+rpRyPmdxtLRHUIp2LqpXWuV3TL133yV3z0hxH/d4ABGDQDGD4FCHagU2RucWRbR29vF3/kZ24sF2YG1nTncYE9tH/f138TqHHNdyEqOBjo94D8F4GsZ38fiH/T54AjOHHnJlftB4A2KIMi+H68F4S+d4LgN3Pcp3s/mIQlKIQ6uIBc14BQiITzp4QpgRq8knQ0Incp4XYHkgCHhViQd2dwRyVrCCdiqBJ7F4dyOId0WIeH9IYZoXhkp4d7mIahhIcXsRKLBYg7wYdtZ4hzJyhQooiL2Ihu/8KIjwh1YuOIGwKJlUiJsWKJkghtmBgkmuiJnXgkn7iJPSN66zGKVoKKdKKKY0WKvhaKqQiLqyiLrUiLmwg8S8SKQ6OLTcOLUIeLpniKttiLwziJxfhzwZiLx/iKy6g4voiMzciJ0bhqz8iMkeiK1HiNl6iNmTiNLVONDJeMwVgg4CiN3AiK54iN5JiOsciOs+iOtQiPtyiO5ZiN8kiM92iM6liK3siP/Vge9fiN/7hrwBiQ/piPaGOQALmPB9mNCGmN2xiRDimR+1iQA7mOD+mMF0k8ComRFCmKGzmOysiQwviR7WiS74iS8aiS8xiSHemRExmT6EiSFpmR5siS+P+Ik/poMIN4Z4RYEYLokz0plET5h4tlh0iZlEq5lHYIeAPzd5b2kxQRlEZZlMjCeDbheJ6FiGPoeT5BeV5ZFSOpkwn5MWEJerj1TgnIcy+ZgfVGgRdVhbe3kjIJkjGofjhYe3IZfSlZlydZhHjJhfX3e4Jpj2QJkYB5NE34gjXnkiFpgDAYYoQ5hYg5k36Je0NnhSa4Yk04lpf5epk5l082hHBpmaZplxVYNM9HmpH5mbv4mBooOwhYUGBYmqjpmjmZmEHDbJu5lp0pksCZeorpgqzZmDZpmLgJgiJHhDM1mUenkccpkHc5nHmJfc7pddB5mNmpm6q5mMUZdo4ZnaX/CJnGKZlM2H+eeZrS13Xa14XfuYHiuZDxiXFv2ZrNeZ7VWZLaeZPcGTneuZfu95fq2ZdRuIWU6Z4Aypy3OaB0WaAQWJg5GDJ1tkdDWZUWepV4Bmh+dWdM2aEe+qEgCllO+S9QiWl+iKFSCQBUiaKDKDCBd6KUlqI8kVqtlGptGZxmaXkXyZjgOZ8wyaAM2J78eZ+ciZ446qMD5aM8Cp/7iZxAeoVC6qRqdp1RKp1NaqX92aNXqpdUen+VuaBgup7YOaRTip8Q2pDJWZbTaZsCWqZFmp8/GqYEqoWaqabm+aZnKp9bqqdZyqRpuoR4eqBk2qaEipnPOajfFaiHKqWF/9qgdCqauUmkvmmkNyqcbDqnPNalO/iljfqaa2qfcppetKmAkLqTf7qdDlqqdhqXmlqnnIqpsAqai4qlkjqqbBmeqVmepzqYZiqojBqrnpqrWrqrEviesrmn+kms4nars2lktQmqnRqpoRqkXoqqbjqpcJqe0wql1YqoouqspMqXjhqtppqq4hqsrMpXz/ozMsqVcAijmtauGWpnF0ppIXqv+Jqv+pouI2oTJcpp8hqwFYqiLlp4WEkSoVVYZ6laZ6mt5LqqQYJ5q3RbuEQ8S3qsyoqmT6qc/tatxZqglzquwCqtYgqEvhqhvTqrGrut5fqoLQinF+uwI9uyJSuFKv9rnSk7pr8qsjxLrQZ6s+wHstA6sxDLsjzInh6Lsoqqs7RqtK8qqyQItP5nrDLbsyRrqFHLtFyas1W6sg/7tD77oCe7tUvbtXy6sUVbsz+rtThbtknrtUQLtty6tmYbtK2qqnJLs3EbfNp2rnN2t36rt1YruGF7g3lqt1z7tmfrtNaqtmIrtTF7pMgap45ruGPbtth6uJS7t95auC+ruZFbqbBZn7paq+DKrJPrln0boImauZebrGibt0ebtXULAHGzS7frGA7grnkIr4XFu4EIvIWYNrtLvMILlPuavMq7vMsrsPUarwMbo9ELvVZJLQhwvdibvdq7vRMBFdv7veD/W7XoGrvOyBHge77YyxTmi77nSzwLW6OhJ7pI+mqpJrGtZbFUK7kZu7iV+7mvO7VCW7qM27lXC7UmC7n5K7+p23Ogm8C4ur/0uboKmqmJu6mNy7k767mB+b+h+8DkO22xib8BPKwfnMEFrMHU2cAj7Kcl3LRfy7fLGbKjucIY28JwO7ize8Bsi7hua8EETLgnPLePu8MADLisi8Oym8Qw3LE+/LFGPMEY7MJRPJ4hPI4dPL83Sp4kfK22+psK3Kdsw5tU+MQyHMRAfMY5bLNEfMULnMVVrJZdTKke7LIbjMA0LL5mnLZYq8O1y8YQvLlIvMShGbg6kbsGZsh8UbzO//u8v+u7g2e8kCyvzDvJlFzJdrjI0lu91MvIj4zJmQwsHYnHaKzEnLi+7Pu96nvK7TuOoay/Nsy/dGK/tkW/8UuSsPvCP6zHgQzCpLvF2FiTbTy6ElzGv0iPc4zLU/fGtgzIeUzKJjzKgoy0TXxyxozFwhzDQ0uKwPzHopzGdKu4BFnNwTy/WszC6rjNr3zLyLysvWzOvyzO3OzKdJzC/9vKX5zOTjjM2dyS1tzPwqo3ODeQ3ezMUmzAaly79nzMU2xw7VzD5wzP+DzQ0Uy74Cxr6DzAz6zLKLybAT2fEp3LBE3FDc2RAi3P63zDG92d/ZfQ/hzP5ezQ76zQu5zR2/+nzwLcR9PbyTmt05rcyJ9TvD99vFNpyURd1EbNaJ7cyEmt1Du9eI7s1EvtZ6oMvt17FVONyia90Oq8iqZ81debyl6dve5Lo5VnozI9vkciy1OBlhVrxQ7c0hHNwBz81uO8wC8twmS8z818wTPNzjbty60bx9ma1X0t0n/tzoF9ul581kJsuXac1zet1cy817x82DCd2B24roBd2LDM2RGMzZH9rZkdrkeM1ifd2Sntn8R5x4Rt2pK9c/8J2Zvt2p492Rw7yKX9txXsqnxN277d2P772LuNt71N2TQ90Xxc0X4c12480m7N2veM0X4N2rOt2z3M2yBd3NBc2dSN2KL/nRYeiN3Hnd3knczODceKLcdwPc8cHdvDTcghjdrGPd1MLN5K67pS+9Hjbd6WTTyInID/LReK3NSQJtQqauAaEdSRfJRH3eAO/uCNFNUF/tQTLuEVTuB9aOFkxwAc3uEe/uEgvh5hrb36XdC/PYld7dVgPeKqRSBHNBgTU9a1vN6nLeINS8tpqR4vThYTqrkljtLAXcdEvONj0eP/++PyfdvSbN8WQeRiYeT53drzbdjdfdls4eRhAeVEjORbbdDfPM1NrjqoG8+WqtJwiuVgoeW1y+W27c1DXLtoPgBqXtFsrrpVTjxxPudgnuQaPeUM3d/jmOea7d0n3ufbzd93/x7oYr7YNL7HB13Rgk7aUFzbdS7XUhvpY87cLCjkcL7o6l3XYJyk0oHpjA7q5srpkO7pgx3d/Yvqe04RpP7pZL7p9Hzpqu7jUn7o9I3bk84Vsb7qjK3kFP3qAPDruM7qjv7lTF4RZ/hGIrAQAe4WA97TPM3JTq3gCi7JEL7t3N7t6qLhc4fg4k7hGY7hh0ju5y6jLH69Vb3uX53rhh7vr5jiV73iIz7WHcOwOtrolI7jba3WSUFqpjvamS7dQC7syU3sy23wfB7fn13fxM2r1x3x+23ifv7wvE7MY/zeua3rBy/vyP3oCk/Xs07OytysBF/qJf/PYbya0B3sIM/dEP8P3wgq24R+8W3u8Pmc6Oid8rKu6Z+q2jBL8kBv1yd/p/i95fCu87C92jZv5YXO9MB518/99JXe3IDe8+A96FCP85VO9VqfFeFN8Rbv8Q0f8spO9mSb9Gu+9Nrt5o69xkTP8F0e9Wj/5so99zWe829f0zzPgVsv6Rov9Vj/9+QR7aON+Fkx7dZ+4dR+7agB1JE/7t5e+ZZ/+dsC7mM47pq/+ege7p/v+Z2vEixt6nTP9+Vd9tDI73af+h9P+Mt80Xt/9TCPdBB9+rTP+pIo+6/t9q5/9r8fzrXf98Qf/Khf8Vk3/Maf+5Nb+iv//EXPkLzf78hO/bF/+7Pv+8gP/Nv/L/y6b/Z17/XaT83K3/3Hr/qwb8vT3/rmz/x/7PzRH/+4z8olXf3sj/7Fb/7wP//2L/7LfP4AAUCgAAECDR5EmNAgQYUNGzJ0GHFhQYkSIVZ0eBGjQo0bPWLs+BFASJEkP5r0iHKjSpAURR5k+bKkS5kxLdKsifOlzYg8M+qcKVMoR6ApixodOvDoyqUtk45sWtHn04dRe1q9+nQqUa1Yf3Z9WmBoAYJiZZIVYPZs2bFshaJV+xJu27R044pES1DvXr59/f4FHFjwYMKFDR9GnFjxYsaNHT+GHFnyZMp/736cu7buW4IGPH8GHVq0586jTY8mmMDuas6bz4IdCpHB/2zatW3f7txA927evX03KH1aOGkBBAogQJ5c+XLmWxM6Rwid6sSkBBkMwJ5d+3buCgg2CBBe/Hjy5QMEH36aoIkF5t2TXyCdenWv05/Xv5+A+37+3gWAf08DATR4D730UBMAgvcWFE8+pWCzbyf8ohNAP+4+qCBDDSvoYDv/ACTvgggi0GtECcoz8MDQCFKwPBFHhDGCB1CckEIIIzypxoks1A6DA34EEkgFtPuwPAmA1AtIB8hLUcXPWDQySClpvDEnHCWkj0fsFMhAyiArIPK78oJM8scLmBTASdOgHM8BL4PkAM0qsbwyxyy36+DNIDfIrsjxOCCToCApGK9JNf/ZFO8CPZUsVMf5YnN0OgdTI3JRIEEYcgA/w6NAyjJ/HKFRNRFskVNLf4yg0TmDqhMpSLWs4NQfMdhSTPEi8FTQIE8Mz1AnEQ1gBFkPmLHXSKFatdWq7sRug2F/HHLTB7z8FMgG0xx1xQTFO3LYDK5N1lVlpYqUUuw+8NKCCSzwkgRNbQ1A0VwF8JLQ87DN9sltBfRr3TftdRBZ+sZ16tU+vZxAgAmkBPPd/8RzU8qEFw4yVWPzBQ3KwCgO8kxjw2WKYHKZxS7PQOnds9aHuZVSXXYHvRZjfRXkty9/pVzy44F3Fvkrg/sE4eQgP+gT3vBkjTNmmQ0ANgBZedUZUpDhe560wu18RFJXaItemWVL0Vya6W3H63ZROXkWKuCpb9ISu1h//LRDrkG8dVGoL1666QC61LNYcNG2smef07Z6O2fh1hXTMLsWr9M3vwU777HHm/ZNi1UFnE7BufrZcAww0OtzPhenW7wHTkf99JxFlbxUylNHffW/pc588wc730+v/oxm0DxfVdS7d8xpJ952mMotnD8PBPBgd8aFj1zm4KG/d+2bjL+P5P1C8CAE50unHm/pJw9/dsKtj+gyjzKTiyARFIA/fvnnp3/T8gNw4PcDCSrhfojd0oz6NsI+uQQEACH5BAAKAAAALJgAZQDeAI0Bhf7+/kc8igAAAACAgNDN3Ds7O+no6f0AANvZ58jZ2wB6ekpAic/l5Uo2fngsZYkmWLKxv8MTLLQYNywAAKkqKmQydKoAANsLGExYWDAoXC1nZ6MeRGxISAAWFow4OM8XFwBVVbvS0tIOIT9fX6qptr68xQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMIHEiwoEEGBgQIMMiwYQKFBgBInEixosWEAgwgIMCxo8ePHx9m3AiyJEiRES2qVIlRo8mXHVGShGkS5cqbFA0oGMCzp8+fQBko3Am0KFAFCnEqlagQwYIAUKNKnTp1QdOnVLNStSpgqdKrWsNG5epUrFiuXnESNcqWp8i1bYsi7Zp2pUICZs3ezSs2aV2Ve/lqDSyYqt+/FeHGBfp2sdG5iC0SLix1MmWohyMzFYD3cmXOnj9rpqjYcc/Gpn9CHr25c+gAli9n1hybcu3CsyOXTo06dc/VrG8LFs43N2LieZHrpct6t+nevgcAH628L+jXsJlTv/66eljjf507/4buezpt7qG9D9Z+3nV69J7B1xW/mHxq85HVZ9VvmH1++LIBaJt/iNEXl32m4XecgLgxOByBf/E3lYSisQaAgW0h6JiCETpYnIfJQVgXhVGRiJmIaWHIloaLcTgiiMu5Fx+KXpmYnYwBWnhhdD6xGJeLadkoJI1LDYnjgDqqaJSPbQFZI4zWHdmgjkZil12SPJ42VJbSyRcklN+BuR6VYu5XZn9YcskkW04WeeaEb1YYXJwl0nlimlmu+ZiXT0r5oJ8fkgloiIMuhyePesrFp5uFRmnlol/ZeeOjRCqlZFGJHgUpTlVit+lNnXZXqVpcDpCpap/aJemQglLaaJiHRv93qk9tRvrqerea2aqnq45606WMbZllrZz2miuaFob6XpIKNOvss9BG+1a01FYLEWsYJYDVe9rympJm2W47Y7eifhsZRgqlq+667Lbr7rvwxivvvPTWa++9+Oar77789uvvvwAHLPDA7iZA00sIZHtwTdeO1tJMC3skU8QnNQwuRAw4pLFAEEFMMQETfxyTuJQtgIBCJBeGVrIClBUaWSkLtvKcDJQ6l8uewfxazIURoBClu4p6LJxkJmCzslZ65rMAQLM89GdP11n00cZm1cDVWF+dNFVLNz1n1CeCfSXLRnM5l9ixOXDA2myvvcHWUnXNa9Dvoe2rqmUPi3QAF7T/7XcDcEMlt6h0z2j31GYj/YDffksQeACDL+u0q15vlzePZ1MuFeOMOxB45DMWHuDhZFNtt3sScO73BZ//PPfkvJI+5+XRZR57VBWozvgDcIOeI+xCV34e7eWFGoHujPfuOuHA1y38f8TfF2rqbVswgQWMs76170g2b/jzC0afoLJ+TyDABJ0rz/TrX2vO/Oymux/A4myn63cEra///nang9+h+BtCWt/WZr+2AU59/ntR/9hnufjdTipqI6BC2va2/CXwSwvc3/AcGLypHO8ABVzb4yC3PMm174EahB4HnTeV3IFwgmvj3eO4NyXvje6CTwJgi/ZWJ/2NkIQ+DJ0N/wckuwYmrmrrCeIMSyjEE3YwheFb4feS+MOo0PBBoiMiDt2kwx/xECoZEEAGqhiABYiEUuZCTLh4Ra73pPEv2aqWHKGlkDaOi2cfIpge98jHPvrxj4AMpCAHSUh7GUxkHElYHRHJEZs4rGOMBBkkGenIi40kkiFDZCXPhUezmAxlO0sVYFrWybOA5WWilAwpQ7lKVN5NJVa6YqCG2KAitkd+TRxNLJn4OyeyEIodyqAJdYkdWYYoi7Xcoq1w2UtivsaYhqLln5RZLGHmUjO7VGL3fDlFYCqQmdt0ZmigaR1kTpOBt0ThMLFZTF6GM51PXOd/rNlMdj7TnTXk5g3ROf9PcOZTnErDJxal+SFb9lOd14xMNqkJKiR6E4P+HChrFsrPBdHznQeNZ0IRQ9GH9imis9SnFiv6l47K06IgPSZBCcXQlZh0o8FMaTRFmkyS1uWl9czoL08aU4TmlKPt1OY/4blTmH7TpxgF6j2FKlH+ybScK41RS2EZ1Kmq8qJD1Wk3eXrTqtoUokjNKkrD2lR7jlOgIXUqWdNK1K0aNS3YMSNaQ/TGuqxRVHYMUF3TclduldI6e/UKugpJ2MIa9rCITaxiFxuwwC5FYZS02LkmqUnKimyTarTsxzJ5WcmqkUx/1crMtoOzy+jMlWQqbclOmbNU6qgic52pWjX6057/0japr32RVWHrUK6C9bZize2XdkuRL5ZVq/v0qHAbStyJGJetyB2pcpc7yua25qmwiqqjvkpdVVkXAM9VKU3POd3uFpep0B0rcI+r3qLW1rze5S6jsDsm7YbJoPBlrnyXuVbxzta9uM2vKr8bXtm2Nbm+FbBz0evfA0s3wfPtr4EVXN39VpO+urIvrr5LJgL39q0flTBUKazf8jYUq+y1LYCDS+LNeBjF6VWxW9/b4tiO+L8zDvBvV5ziFoOXwRNub45ZvOMh97jGQL6xg2tqYlXBuME+XvCLMYys8RaUwyybsoiza2WWWvgmjlUKRtCIrTqGdj15HVCYcdLXO3qr/8yMjbOc50znOtv5zu06JCIVuRBMehaOmqUYSjK2MYf82a6BjhhnN3tovp55KycTwKOr4trzqlZlCqnZEVM7aamctrWvtAhOdRxivFLNnFf+sku92uRR2q2LTar0gp8c5JKyGsL8NfWmNWwm/Np6qVrWtd5CzVtaK1mhtwZxhIWNOVlfd8v1nWiyaVxqbp2a1/3xdVeBrWpXUwrWbHL2j43NZYBehpzlXvKDwL0nYp+X3NE2N2XQHW91f4jdinL3rKlMNAuNmsjVHte1uyzVbot62qRetrV3TfDttvrg3H54sb89cBwjWNlK+feRAx4gfGtK389eb4y3fdYkpzu6Df/yOKpAPm5+y0nePbPxyYXc8Yrb28sSp4jGR17kmjPc4g/GOE52DmWaD0jltBJ3gY+t1JIHe+HDRjXOcT10hANc4QL/+c0LnvOJEL3WR2V27ZT+YWovJa5nfPMjF9LpzyRgjnCfy5rBbGY2tr1Oc1/JYPHM9777/e+ADzy78s6SRj/W8IfPCJwJfxHEi9nxbIY83RXvMDKxzPKXz/ycML/5KKtS89vhfOhBfx7Re34zpP+P6VWf+gWt3vPOJvvrOzT7F9Ueya2nfe5tv/sv3Z7EsWe57Hv/pN+76fS8Jf7xlR8p4zef+QIOvvOLNf2GVl9VyD8v9K2/fex3f5TXN6//9L//efIn3/zazz7qO8/+0be/9OhfsPrX7/76w//9rMe/6+c/fv3r3v+8B4C+x38gN3wCWHzxR3/2d3r9t4D7d4DLB4HPp34NeH8O+H8XGIAZiHsSSH0J+GPhV34dmF8VmH8biIAj6H0pKH4FKHwuGILnt4IimH0l+IAnGIE3OIE5GH0tCIPpJ4MxuIPchy2Lt3aMVxEtUYRGqISWdIQ5IXl6J3hSOIVUWIVWyEd6JjJ8loWMRnlNyIST5YVh6BKRdUll6IQTYQB3V0aRtoajZYFwaILncWkyw1qmJW5fx3QYGIc2uHUOR3U3kYczt4dyWIhGx2SAuGoRl4jg94Fk/wdvGSZtiyh0QyiEKgh0iEiJiuh0BheEfEiIh0hejAhxnNh1CviJGuiH9/VdglhvhgiKsBh2PMZzXtGKkWiJjQiE7+ZyUuNvVrdxOIiKA9hwq9iJOveLtBiMr5iKKCeKmkhVk2h2OiiMKIiJziiNVReNCTeNyziM1phqpggAtlhluDiD1Ih1F4eNgYiMRReL3niOuTaL7UhyAWVyrtiH3ViNqrhhxuh17Ah275iPyhiK4DiKFTGO/aaL8ueIZbeN2ViKBvmD5eiJzViQz0iK9ehhPriQxMiP4YiQLwePHqiQISePAFmLO5N25QKGmSWGLYmGEtFmHbeG2QGTALB3V/+Ykzq5kzxJhVxIMVvoZy4JaEOJaEXJV1BYeGZYWUcpWEl5EW7Yhqy0kacokJFCh3zxhlb5kBnZj1WJj2ApY+p4iWbVleFogBO5i14Jgr6ojVfHjWHJjAQ5jwHZdGYZkRyZlnm5j7cokuvolsAIl+6ojxVJl4Rpl+cmc/c4mAPJmPF4kRSJmPOmmH25lZXol06mkW0JkZApkZiZi99omI1Jj4lpj5UZl3XpmBe2lnj4j3ool6p5mYV5kqMJV645iLCZm6kpi525l8gGmMkomLp5mGLpkLIpmTFnmuT4meZomZnJmiwHkr1IkmxJnazygdJ5JwxJlS0HnTqSnWNjnS//iG0JqZcSAZ5oyZyRWZxvOZIwJxj0dpqxSZbO6W3euZl32ZtfOZ+gyZfLWZ8YWZqaKZ5SR5vCaZsvo5JuxJJEaZNJuHZp1iAO+pRI2JMWeqEYmqGK9ZMRE5RnyKBGOaFNmXhkyJQl2lkjKmZRCUqotZ0MiZV58Wl3GJ23uZjDWZvEyZvpaJybmJ9j2Z8Aup46GnQ/GqCTqZzlqZ6eGaRqKXKimXE1Kp83eqA52nNEyqPQyJlFKqS72aUcd6Xt+ZdaiqXNiZpV+qWZuKXHCJxP6p5K6ptzyXV4eZ5R+p9miqN4+pg7GqY9KqD3SaDkCTWsWKdJyqRwyp9XxYvaKYlj/8qnQHqnVIqOYBqYXOmnZzme4gmJdvprjUqpx4moXIqm10imB0moIWmo+zmlq+mkBlqpRzqg5lmdoSmn+imOpjqdsZqes/mHtYqemJqrLqiphYqc8EmZm6qqn4qsz8mqr4mgnRqcboqqsuqfcKJtaXFAJYetl9EAFPqEKRp53zp5Igo23Gon5SqiGpqu6rqu7MovIgqTD/qFSziv8lqvY4gtCJCv+rqv/NqvEdEU/RqwAqurkBqtrqcRApuw+poUCKuwCStuKyppU+mi4gmjnmSHq0WjbNqqyeqlebqqJtmsKLmxIvuxHXumkpqmpLqmz9qmJ2uy9BmnvKqm/kiyuP/psZGasyBrZNAqpj66soeqrI/KnqPqqEaanLD6pqlqpSprtKVqszaKswYLqk0asjc7si3LsTFLtUHLtEXrqX36qn8KrAU6s0BLp1ArpVL7sjp7Yooanu/JF/F5rGu7tUKbqNCmtlj7s067pAXLtju7p2CbpXw7uGXKtUsrqhZJs2ibtSXbtnZbt/bJrFd7dre6qGTLkMJ6qsQqt8Y6rIg7rdK6dJULpWlLtygLuUM7pE1ruE/ruKU7tXcbqin7tT0btkg7tkorujJbjOGoraUJvIVxriCKlOEahcerlORqIsS7du36vNAbvc/7rvR6r9X7ksXrlMnbeNuLhFRJsKH/K3sN67ABy7Dk+7As972/urtkZ7Gm1EqgNn+8O7t+G77B+rbipr7cCb6sa7suC181aL/725B9K1wBTL9dm7qBO6m3y4IUm7kd2WtYRpL8K7mH27+Le7avdcAWTLuqi7eUG7XA14MPzL4E7LrLxcEKDLgsvKxWK8IUpsIwu7odXLU8+78ODKg6PKtmW8C5JcMf7MGy67UZ7MMbTMI7bMKbi6vIB8RDvMKRq7hTV6v6W8Kje8IN3F1O3MI0DMWT+8J6G8NIDMHAusSY28RjbMIDbMZwi8ZW/LdRXLtFjMJpmL2P173eSr3XS5TLa67dmobSG8iCPMg6qcfWa6/Yu8ch/2rHd+yg5yuw/9oyj1y+63vFlje+k5yv5pvJ+wqxOyOVLZrEV+y+YSGjGfudl9vGlqy5+KuxsAvDQVy/CFySN6y1hGupczq/s6zLRDzFjGurpwu6u1zBUkyrv+yrA7zGrYzKwcy5AszKeYu6ltvMTLzKmbrM+InLtUrMMwzCYCzNpvvKYRzLCdzNExfNwuyshZvFQszFF9zLxqzByPzG9svGrUnNZ2zNZYzNjLrOOPzOXgzQcuzL8pzK3EzOiTvQ8WzE8yzKcPzFtfy4rpq7l5rMgVon1rq32vzLB/3ECt3DdNy4/mzLAo3QvFzMIM3OLDvSEu3O7RzHesrA/1wRwv+bnDXNF82LyA3KyODax32MroQc1EI91HRmyIms04usyMZr1Ed9Lgzw1FAd1VI91ZvByfza0S4ty76HyZm8yVYdabHiG0IhsaFMxiZMyqKFsZjGcsDyEwq6zZX80N4c0bH7K6Xy1hwd1/XMz6PR1j3yuc48zHqNwQRtxH6tJUga2DVcziZNurBMKmoC2NUs1y/dxR/tu7l82G4h2flM2Vod0OccwuMM2XnC2ars2Yzt0THduio9EZptKqaN1TCt2gvM2jNNGncd24O92Am92v5L0omR24k92c98zeis2F7x2nitwbJt2b49x60tEcqt2xZt3KINzqSNKNRNz4Jtz2z/LdxJq8/UKqhrOd3D3dnFvc/HTdzzAd66K967itm1at7hjdq9Xdu/3dIrQd/vbd8nfdkemdnuXdHcDc8pfduuHXdyFAIKcdN5kdOHHOFN3ZI+jR0Q3pJEneEavuGDxNRJjdRLzdPiKuLI68hfnTA/duKavNug/dnHx9WT7NVW7ckvA8rxW+AtvmBonRWmvNbMLM7YndWpLeS0LLjRDcxAns45ft9E3p3XreTTnOTIbc4u3thYjOAivdHMzeJUvm/rjd6kKbYE7tBEC91YjuQsXdez3eSPyNdlqeVG3Nwljd9mDtwrDech/d9WXrbyfcwGzeV7ftFhM6j4fNrprcRu//6bUs7evK3ntO22X27oYU7RuSznlT3XRn7mDW3WV+zdP57mj83mgE7nhZ3nm67G0PzkUx7OoD7aoq7MkX7Piw7mjW7pNpzpdl6zsy7ptT7qkK7qjK7RYl7pvr7mLkzXob4SDp4XS7PsYnHhO63U2juuFs68fxyTHJ7t2r7tAOPhIS7tJEriSunt3w4uVbx5MP7Itj7kxu7cPPiibrjuTN7uc06CaVyRYTRGye7ur/6B5w50EhDw6RLwMrTqj07vl57wKXzvNEc/LyQAbBPkCF/lB8/vAMzwYtk2IeQ4Bt/vZO5jW3zsG6DxMLQ2FdDxE8/uFr/yWozxstgAfhNCB/+AP8He5fPO8gpvwC6/Yx9UPyW/Np5D60vu6B5PgztfahFE8hDvNzUf6B/PgeodANSj9OnD60Mv70R/8akO8+VzPvfT9BVf7yl/8y2f6gEw8tVzPX5z8kJv81mP8xRv72YfAMizNhzf9k7P6bB39Fjn8KoD5XmP6hTI93oqArpT8Hgf9jlP9ot/xHMfAC6UPRIP9yov9pT/w4S/wA7wAJzf+Q/A9ih/+W9v+aRPXSEP0Xld3bH67+MN9kUv+Eb/+Fuu+rtL7uG+dj8t7txr+7dvSdz++8Af/O7C++AqogVw/Mif/Mq//MevEMz//M9fR7pfoXgMyNUfk1U81dq//c7/D/3ej/zd//3efxfpfr5Yn78JmGlUI/7fH/7sz/wKUQLxXuyir75I9xvu//7Kn//6D/4CAAEAEUDgQIIFDQYQIADAQoYNHT5cmBDixIkSKV7EmJGhRY0QEyYYEFLkAA8UTJ6kwGHkAAUJC7yEGVPmzJcuad6kmRDCQQkRfP6M4MAgx44PiRZteBTp0ooKmW4UAHIlhgNVrVpVMLKlAJxdY9r02lWnwQdXzQ51+jRiWrVK1bZly/TjSgUXzF6loBVs2Jt7+c4cS7DB3asbCrpFirio4reJ4y6dO5ID4asaRG79i9NvZpiBB0qgbLUBQcYaS2c83Rj1Y8dSQyoIbfVD/1aWmzkXsM3Zc4AKsatGIM3aserUqi8Wb+p6AAXfVTG8zp05+t/dIpofECoQeVPiwo133G40qkgN16tmxXxb5nS+nstevzAwvPju39/OTzo+pIe7FiZYuGuE2rhS7ysCC6xJgJ0ySKjBBv8jrALtvAOPQtMstI8i/KByDTazJhBgArPyGhDBzg5EcCwHVwzxLgnkwxCjDaHK8KkZAYgspMmuatAsy0o0ETcUCxyLQRZDBNCs0Sasr7EbjbsxR5Y+4DGhqzy4jL2wtPRqtwCaeyC4Ju+LsUaHotRPJKqs6hGrLIdUj0uxFCzovdAOK1PDPLkzczgylQuJuaraPEClN7WDFBJRLwOwi7LsYBwTrj79hAvQAcob1MoDZtMLztvk1IzOghygDDg8I7Vxzz7RtPRSDDBo8NUfDw0S1L5EHdUBXXd1YEkxnVSVvkkrBLZVkRpciVYTbc0J14Oe/ZVMVIfVs9hkR+pAgA6uBXJZT3VzFlponyQ32BpZ5TYkEDoAgdv0vFU0XHEPKnfapQx4y4CE8H1KXwFCUCBggQcmuOB3U/xWOgFImFfcBvbNF2K1/OW334AAACH5BAAKAAAALJgAfgDeAHQBhf7+/kc8igAAAACAgNDN3Ds7O+no6f4AANvZ58jZ2wB6ekpAic/l5Uc2frKxv3gsZYomV7UYN2MzdcMTLNsLGSwAAKkqKqoAAExYWDAoXC1nZ2xISKQdRAAWFow4OM8XFwBVVbvS0tIOIT9fX6qptr68xQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMQGEiwoEGDCAwIECDwoMODCRYaAECxosWLGBUKMIDgoUeCETd2/Ogx5ESMKFFqNMAggcuXMGPKlDiSpEOTKXNaNLAggM+fQIMGXYBgYU+hSIUuWKizKcWFCI4mnRpgqYCoVKladdp0IYMBYMOKHUtWAVSpWZUy5ZozLVUCC91OXcsW5UICcpPezYuUbt2LCxOQHUzYrAC8fIP6/VsxcVC4AhwDXcwYwF7JPi9jpsw4MOHPYg0j3iyg8kXMPiGjDsD5r2bJrx23rusZNGjRq2ezXa0atW6usRMH5/vbaW3bhYfnLd6Ud1zfpU1XVC6XulvmOo8jL2s9Lfa2qHuT/5Y+/XBu89DJP2W4PTn68eqdR06vvntW+1S/p9TePixu+uTJd556lr0Hm4GyRUcef/0N8B98AYb3HITS4TcXgsIpWCF7DYZmoV4amiYggBuOdqCJCRLIYH8PHkjgiBSa9mFfGBIXYmUrttdiivFJOF+MONa4nJDV3dgZhx2CtWOGL/o4YH1EXheld0a6hmSSS9rYJGbiuQglihmCqSWUgiWp5IxC6YcSjF4uOOV9b+ZXJW1XdpjlkFtK1iWPboo5pJ9Fqlhng3cG2iOXE7ZZ4nmASilomWYW6uiheibK56LQNUrlo2aeGeeFeTq2J5NfMvqkm5BiiaZic+7mJIkyfv8KoqZwctqppJtSmtgCIZ13kmkaJYBWhsJC92tlwSqg7LLMNuusYcWSdixjGi1k7bXYZqvtttx26+234IYr7rjklmvuueimq+667Lbr7rvwbtuQTQclFBi9N0lE3ko14QsSTf4ahJN0/AZckEn9BoywwSANmxdRRmG2FZRYSWZVxY5N7CbGu54lsccWs3lprLTKWTKopWZq6mqvAnnkybOeiilpMNMoZgMS5KyzBCwLNeqYfa7sspU1pymrzUI9cMDSTC/NQc9A/YxnyjTLTLLQJwpFQdNcNwB1apaSGrTKsAZZNKtnT4YgBFxzPcHXAUht6NhVl/0y1gmi2HbbD3z/LfekdGdtN9F4hwlUBHtzTYHfYQM9s+BD05n2T6uqPZoEibcNAdR/5xp43labXbiNo02QeducNz7154YPLvnof/6EeNMXVHBB24v33HmtVEOu6NVk1+0T1xUIUAHfqf/4u+jBL39386D/xDbT1nIdAePKj8y88M4TDn3rP229dPVNe5186M9zr336vkf/k9LjL9T009ij7736Yj/ufvsBmH4A+UuDW9xUNzf9ga97r/se6YTSAApQwFoOpEDf4LY7OQlqcpk5mtEuCLsiAcpaAowaAQFnwAW6ji2Vo5wG0Xah7FFwhJ4rYexOCJwVWq6DIAwh2FzoOODhr4fb458J/5GSAQFkQIdV6ZWx9hUYhw0pWgea1l+C5cQiQTFBV8xQvLbIxS568YtgDKMYx0jGMo5LinUJFsMGMjBgAWyNC4Ojvgj2RobF0Y5zBJagqpgWjW2IY3y5GB+z4sdYAfJhIMuYmgiEERjyjnVDRCAKbahCDLKmVYw0Dg9XJ0MP2i+BP+RkJo/0yUlaMoUZ5KACRTlKOpWyhqekZCp7t7/1tVKTr9RkLGOpylAW8JaTzGVXZHlJXtLygLYEZk4cacFjRjKZpuwgCZWJSxrqUpox9KEQWUnNZW7yl0GsZf60KU4gdnM/35xmOJE5znU+s53ntEs6s+nOGUYumqsEZzwbOf/PR3ZSSsaEpD0luc8CCTM7xETlJXu5TX0WtDwH9eYuI4rOiVoznsxEmUA9eVGEWvSeBc0oiBhaTm7er6HqfChEOypRbPqTnOw0p0oNytKKurSZGwUoRZUpUhqRNKYmBSVK6TlTmoL0mvlM6UlL6tA0MrGfckIjW6gInSzaSKpcoSpprDokrDqlWmYMq1jHStaymvWsaHXXvAxmL4askQBtRFYdDWaSlsjkrnfNo1xF8tY70lWv1BqkVooiAMFOpZBBOqRcrPKVWy0SMFcxbFIE+TFMOkVk8FwqsTrloMdaRKGgtWxzWkZQpG6Vs4b56TtlWhfMshafp3WsaD16U43/Sse1QYXtgVJlJ8+W56OlHS2ioGpbmNqIt4Ty7XqAC02u4LapusUiapVbIOZmtrWkba5pdzvd2bY0qUT9y3OVKlTpyla1A9WucCtF3JE684ndRS9Hj7peUfV0g++1Ynzzq9Oarim7143uZs/LXyoF9LYAfi0sz4NcFlE3tAcWUYJzu+Cq7jenBt4pRsYb3vIOOFIPTqiIQ5WYCha3nvolMIbhFOHKcPilKJZSg3UUYusq+LIThm6FYwti79oUvDBmzItx+k8qzXg7qS0wizV8mhyTV8DHvXCRl+zfDTu5w1CGr4qnbDImWwQ1vLovq7zaFK3u9llofhZgp9jEqkoW/0Rk1glY00rnOtv5znjOs56vFeecrISOG3lqn1PyZ0APWiVrTmOip7rorAoKSo+GtKTdFGlKFxWyk95QpTWd6Vht+tIF6nSQPj1qUR+J1EVVbohRbSVW08nVD1W1j+UJ6xrWWpO3xuisMW3qVvf61b+eJKg/u2ti5xqhx/ZmstE57PIE29bPxnW0h7lsnhbb2dNGdraVvW1mNzvUlg43p8Xt6W7T+tuyrjY/1c1rcpe62ek297rl3e5x2xvU8Xb3qeltbH5jG97XXg+7+61vXxcc2AD3t8AVDu6DC5vhA1+4w6E9cWlXnNroDnjD7/1uju/74tpOOMi5PXJvl/zcJ/8HZr49bnCWI9zlDxc5zCk+c4vXHOMpv+XKy51zgt885D/3s6ANPXSiG92NgT76Xg+dkT07/elQj7rUp96ttQasrVb3V1yp1eivdr3MX5fzXBU2dq2H3c9vVgph0z6UVTM8xIp1C2UtRt0hn/jjQTc5ly/UYiFfOcgt53ne5WljCuN4uF7+9+DnveIuV7nJiH+8zwVP+RhnWPKN+TuRK493zmt2tYavb4nFrLaIS3zxkC28jg/P3sSf3vMvN256A+xczd898B2HfZbnG1yd2N29PVe87nc8VMBjN/L0BfrwbS573qvX97YHPuonn3vLUzn54EF+70k+feFb3/HYT8n/7336do1vvPn9Df9/tf987i8f53uPGeYpMn78Bv/11WcfU59ce/bTnvn5h3v6B1SrJ3p8YWLS937KF4AeBnoFCH3+d2Pwp4Du931853oAUH+sYnrnx4C7l37bJ37RR37314GdZ4Hyp35WZjFKJC1Fh3RMdxGFhnRcZUVsN2ZPRXU6uIM82IM+uEVZhy9Y11dnR2hFiGhJB4OCljBml4RLty83+BMQU1iVxYFutzFR6BOIdYJ+F4Ght4BcKICf94UV2IWtN38mKIaxh4Ik2H30N4L254ZpuIYDKIFgKF5wuIHlZ4UjxnAaWHp7WH59p4au4oUPWIaESHPoh2WK6GJ5/wiIJXiF8deGFCiChsh/ABiGdDiGh6h3EnaJjJiJiSiKDUiGnuiIoGh8MSeHNYaBdfeIKsSHfDiIm3h8Z6iC9VaJKDeJcaiLK3iLIbiLvpiLbNiLHth/wNh+p6iJq7iIqtiIZmhf7UWJx0iKzViMeliCfxiLgRiJfaiNsDhL3jiLrmh+2yiOrGh+rYiGr5iKm1eNEwiPtMWO5hiOC9WN6UiL1xiNgdSCUfSCT6h0XMcQWZhBMbgTR9h0P7iQDNmQDrmQQUgvQyhHTjiQB1kRMxiQMMiE+OJXZFeRU1SQUyiSkjiM1HckcddHidQx9eiOt1eL+ziKw6R6mGiAeYGA1P/IjNAok/PoSzUJgcn4f/Gok9ZIfPsXijYpFzhpjEQ5lDz5XT6JlEApjeUoi0oGfsH4i1RJj1bZeBeIhud4j+MoiLWVgKgYlHaIiDC5k6XofEI5laM3jUz5lMtIl4RXljmJhy5plk15h2u5XUf5jKy3lbh4knbJeLzIQmBpjyUpj2r5gZdXmJm3l3n5l05pmT1ZfO/IjwdIetw4lt5Ik1KZfWhpisLomHXZliCojOtXmp14mn35mEZJgD9JmoSZlcQYm6kJmdeHm1/GmOoYnOQIZJupl65Zm7vJljGpmpHpmxVhPuwFnYnRAAkpg9WJkCCpaNnJaLJEnd15nRj5kOL/OZ7kWZ5ndZEUkZFstp1ZBZ7p6Z4AoJ7riZ7xCZ/ymUYIkJ/6uZ/82Z8TARX9GaAC2pi6CZutxhECmqD6yRQIqqAJSl0iuXZViI8mWR4pSUgrGUjteJyjmZxF+aEziZdzaYu3yZqIWaH4V4ezl5aWyKGCeZnLGaOz6YDI2Zol+payCaJ+yZtYaaKQ56LFiZk5CqMzuqKmqZVxWZUUio03tJiUOaIyqqNDmpmBGaSFCKQvGaVEKqU/FpUvCpedKZfZmI7D6aVWiow3yqIGKqQeGqLEmaVomqRcuaQq6pZqiqRhqqSgmZhNKplv+KRjiqJzqKVUSpsdaqNy6qeDqpyM/wqYhvqltpmozpmih5mbnLiaONqiaXqklsqma8qjX6moYUmglWqYzNmbPvqbgAqJZEqWb8qXxrmpr3miqPmpRWqnnGoR0hmXu5oX3kmf98mdwGqfqOSdGPSrOWieyrqszNqs6DKs7Al20Sqt0FqtAGmR1zpFpkeqnkqrp9agDhqgDBquD2p+2yqcdGolF6oVGYpI5qpw3EqoUwqVmgmn+7RzteqtpfpbIhqol4avBaqv3Zp6/cqq+KZx8dqoW+qmZmqvupauA2uqoJqCk8pIALuvlBqx/PqqlRlrCIuue+qMPZqp1gax8tqmhUqjh6pyH9uV+ViwnzlsF6uxGXuyd//JsVAaUi1rsgqbskY6qzq3syGbrwSLs/6aakLbqqEJs+h4sDzLpbbqqCoLqdQ0szYrsFdbtA0LqypltT07ryhLr1XqsOfktVCLtV/bpfXKtR77tAu7o7eKqXcatG4Lt2ertWvbsY4mkNpprXzbt091rMVKrM5auIZ7uIbrtxq5uNj6t8KarYBLMOQqoP95FZMrriCrtJoGrpebn+PaufsJoRIjoXSXuYIKdyQZWRMaIasas6drukxaSRg4qrAbsJ06sUjjpFjKtmkbtW8rto96poMpqanqfbYrsXHbnMX7p7urt3eLvM+7sVvrvEnpFkt5tBi7qNG7XEwrlgjWuk3/S7TQ+7tqO7a8O7x5OqdDW6dym6uT2bw5u73aS743O73xW71pcb0GK77Gm70QNrvA6bKvK5pUi6jpq6gJK7//q7uyWqNoq8DBScDCi79Zob+uy78127v8JMFkG6kHXLEJTL8b3L0b2sAre7sajMJS+7MO/KMmXMAPLMIqzLB5e79gepOeGb7H2780W10k3JLwi709HMI+i6tAq6s+0qtygayO257TKnZP7GeD+51RbISIe8VYnMULqbiNq4RN7HVVjIRc3MXIwgBmfMZonMZqvB6gy59EDLa+i2yc27mf28aERSAKwFkMEDGlK8AYXCDreljtuljUlced4o/LO792/yvDxMbB59sUhmwmiEyyMbzIlgy8UzvBOhHJSTLJczu+lxy25Ru8HZwSnNwhnuy+GQzB8tW+R1wRp9wgqfzKq8zIPIy7RqOPXBHL/THLLQzKcFzJmMzCJ2wRvNwevlzMt5zCwDzKmVzKKHHM25HMMDzDoRzHw2zEvwzLnEXNmhzM1gzOjfzDeNzNOey9r1umNSzEbCHNyOHN0CzMoozNzkzM1WzM5iym+7vDtXzNI2y0+/wX7mwb8PzI/hzO8/zP9svOu5zPeqq5IhuqFTvQoFHQ1HvQzUzP9bvOAV0XFP0ZFm3D4rzMEOzIF50TH00YIc3QrOyqC93R7ezQ6gvRsf+bQbrsFCk9GCsN08xM0rbMvQB9wYyRZkS9LCGwEErsFkzsxUzNuOspuFQMrVo81VRd1Xg2xpHb1GS81fMJuY8ruXacEIAc1ndctyMtcHN8uXXcxqJrMaSrSLXrv8EZyJM1yHJXwsRLyQit0fK80eZ70pqa15/s0xhN2NnsytvMvC/8zQm9132t0Bwt1LEq2KqsyGdt2fWszcqs2JRNy5jN146Nt38t0oH9wYn8xo09zkGtw5Nt2nqd0Y8d2qr90pJNop2d2J8d27At2qRs0B6Mw/pc21m728QtvZHN2rbt2oPdz5e9jrSN3Fe62PEs24ad2sY92ixNwW9xzqgN2sX/DdTPjc5nKd2+bd3Mbd4+vNrizZnA/dDpfJUSnci068f8nN7hjdfKXdndrdvX3duAbcDtPdPv7ZUUK98BbNbovcCieuDrK9cmTdookdTWi9SYsdROndUXzp1QDdVSbdUe/uEgPkZY/dVa3dVfTK1e7cTAeq6PltaTu9/Ufd7ePUpmO0x0jRRbKNf0PVM1XsS4DePfndt0u7RCEQFGfuRGvjnZXdgyzt9NPuQvGxTTszcQ7uRCHuTUxeLHdDrXw9Mt3eBdm7TkxAGncwA8I9w9/eRY/q7359wB0ABlfgBvg+ZfTtNI69L9E+cHMEHrreMIXrJEHgDwo+d0/tNXXt0z/55JPd5SRbQttoM8fT7Ecd3DWh403nI8TTPnkT7ciG7lWQ6vERwAja4tj940Z77paX7oat7pUD7AQRHnXQ7dqQ7krN5Ki16+U544S97ck87pii7m6yQCmaPkhc7kqn7sn97moR4UmLM3uePlhk7rq/7reP4+EHDt2A4Bp17svL7jYV7tr13r4j7tx17pfMrtCd7rqW7uEV3eiY7s6v5VKY7igMbh807vJY7hAxni/N7v/r4uI67iT1UABF/wBn/wCE/wC5HwDM/wgXHvUBzwe6vsFKHGFn/xC9/wGl/wGb/xGn8XLk6u0l7uoN5YPebxG9/xKJ/wC1ECqevtbYsq3f+18g2v8jRv8AvhAO79x+faYB5gAUAf9BawAWNhGDfP8gJw9Aif80gRARPw9FA/AXyO7u+e7G7IIBiQOZxs9EqP80nf9RwvADov5ZlT5TFO8srOWwogPntjAR7y9WBfADav9EwPFHCeOfRD9Z7O5ldfJxtQ5hrgH3N/9IN/83UvO2UunSNv9SiqHQoQ5x9gyFwf94VP84cfAM1+OpqO6nXutDIfFhag5xjgKXGv8HAP9pcv7HHO54vP942PJBqg50uTx5OP+qff9Yef66fz7K3/bap+HB7QNrVzO1wzAp11+3SP/IQv9ohPO6XONWfe+xlHJv7RNsWD6Uzj9sdf+nL/r/yGz/w+kflLc/1tE+vSL3Oxkip/3zQAtDSBv/2lX/krf/mzEz8C0DbQef4y+7GpogAfwP4AIUDAAYIHPAxAqEBgAYYNHT6EyHBhRIoRBToIkDFjwQMCB3KEoDGAQAAlTZ5EmbIkSZUtVbJ0GVPmzJUCaLoUmADhTgwcPXJUsFOhgIpFHU40WvSiyAAQCv7kyBTmzZRTqZ60elVry6xXc+7caeGpwIIbwA5NahRpWosCMDKlQBAqwQdSbW7Fehdvzb19UXal+hXsAA1jP34IKnQt24eLGTdcyvSBXLIEJzAdqXcv4MCa/W72rFXwYA0YMHg0rWEw2scQHbeOLPmBjscHtRtg5nwzN83dn2f2jjl6MEKPwxO+foyccWzcAjGLBB48tOjpvjv7FT68g4AOxgewbn2UaHjxb597fK4xOs7q162Dxi5Ap/cBIDqA8A6efAHlbJkzzUCADNLLaD2u2tMNQZoM6MsAgRjEy0EBQlCgQgsvxDBD/cjrLy2BSCAwxIwaeLDBEveSEMIIAwIAIfkEAAoAAAAsmACXAN4AWwGF/v7+RzyKAAAAAICA0M3cOzs76ejp/gAA29nnyNnbAHp6SkCJz+XlRTZ+eStkiyZWsrG/ZjJythc2wxIsLAAASllZpysr2AwaqgAAMChcLmdnbEhIph1C0g4hjDg4zxcXABcXAFVVu9LSQSJPqqm2vrzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AExAYSLCgQYMIDAgQIPCgw4MJFhoAQLGixYsYFQowgOChR4IRN3b86DHkRIwoUWrkSLKkxJEtIUpMSdOigQUBcurcyZPnAgQLcfYc2nPBwppIKS5EIJSo0wBGBTB9+jRqUqRLm1ItmnUrUatXaXp1SmDhWKJHw6ZcSODsULZue6ZVixFu3J127+acS9eiXp1lBfzdK6BvXQFtB+fVy9cwgMV3IcdtbHhwgMCWKfeV7JbzWc10PY8V7RW0WsuYFRd2XJH0VtdUTYeF/ZS2U9lXUZtVzbo14sy/efd+HPyvbbSre+sWLLz38bfFGSdn/Vxu9MjTHS8HPpx4YuPXJ2f/N1ydZ3m84/tub049fGf3n9OHhj+afmn5pwenBt/9vE7/hPVn32sDxoZfWOvxNxyAATCIW1IOFljbgbnpt5uCzkl4m4bICfiddB9i112C0nkIXIjimagaiu+NaCFzGLbHYnwz1qcieDXe5+Jf+5W4IIfQ5UjgjSByNxyJIv4opIFLTkgkdk1uuKNePSaZYZTIYQndk+Jpad2Ud1WZopInGnllmexp96KZMqIZo2MRemkehUkhOeaZK7IJJ5DWyYkemHGJ2SKZeaZJHp9z+vkfnUgNtkBIwJ3kmEYJaCVepapJahillr6HKXia9qXRQqSWauqpqKaq6qqsturqq7DG/yrrrLTWauutuOaq66689uprqqHSRWlMDpnU20owEQvSS8oaZCxryDZbkEnJNkuttCAJ2OlYYGU4lV5RfXtXt+2JG1e423qF7l8LCKjnoYoSFm+DjNYUp5t6uWvoZoiiN++DWPW76L/uRWDwwQYPpe+b8OJrZZuF4tiTAwdUbHHFHPS0sI94SrzvfASH3NPFJB8QAU8bP7ynyB+rdW/EOT1Q8sUToPzju/yyzHDODne5UwMzk+zATinfCbHHO4Pc83sfShD0xRcQfXPLswksL85KwwylThE8TfIDOhU9aMdFUn3Vy0hP4DXJYU+dtMtW00swl0y7yQHJGFCAQckdtP+dIdZw68zx0WUjHQDJFAhAQckn5yR2fHTTCHjVgm+tk8wWk0qyBFL/bTaEcUcYeX2VX5D5QiQ30Hl7k58d+uuj31c5xRVrbjHYq+/ZOuiVG73y0pLz5PQBtlessduD/6617w0vXzdPDXRwAakXVD+0zZ6/TTnwNhJquOXQwagw8io3/z3zPDsffPhPPd492eAnb37h9M8p/vHZy5/++WMTHv//O8mAADLwlGCpRSOROtZCPiUdBmLHgGHhlGoceKl03edXGMygBjfIwQ568IMgDKEIY9UQbCVkgdgayLMmxawUmoQBCYihDGdIwxrOBFotxNa1XHjDSVmQKj8JCrv/AGYvqfywKgthwACWyMQmOvGJCiAiTboCLiqOS4opsVP/lAeeBDzxi2CMYr2m+Lq5HWlNnwsYwbwIxjYyUYyxI5AZlYNG7blujW7MIxy9Vz/00UWLkOMjdtiYxzBiESVo098f66jIwAGHkIWE4iEP07stVoaR5dtfAyNpyDGupYy7axQm/ejICXLyi3uEn8/SWBNAvs9/lzqlJD2JSFCyUiyjtKQmBylLJ6YSls+zY51yGUhVeqqXTfwlF/uoy0Xy6EKN3J4pkblEZc4PgM3MzzPvR0ppdpGa1ZzkRRKZSWdSCZrlLOU3wWnNXa5SmKLcZiiLiEd2itMi5OxmheR5/8tP1pOa7cwa/4rJGlfqSJCxtCctKcm9g9KRn/Ck5yPBOYCAqpOZBFUTRKN5x4kqNI5MmicuN5pOb27yowgNJkeHSVJ98s6jAL2nbyqZ0UuCC1KZUiBDjjihBCjgp0ANqlCHKkYIXkWCXeTphoyalFGN8KlQjapUp0rVqlqVV0xFykpwuBGdZrUmW+XqV2kSVhZ2VazHEtCP1LrWtmaIrW/tjlv3BNf21JWuc8WrXO0a177y1a96/atg98qvvB7qroc1bGEVS1jXMRZuiF0sYBM72cY6trLziWxmH1s1zVq2iJy9LGYhG1rQefaznyxtwE4r2sFS1rWoBe1oO6ta2f/OtrWBje1qb2va2pKRtb3lrW6VslB8Fte4wN2tcG0L2+EexrephW4tk8vc3Dp3usv9rXSfu91xHtc311WudSXbXNJmN7rnRa1MiUNd7aaXu+/1bnirS97xbja+yO1ufueL3fLSFr/g1W+AAbzX9a6XvQImbnvR69/wGvi7Ck4wggk84QZf98EL7q+FxWtf8/JXvhQ+sIghXOEO/xdaXkWris061pSUdVM9hPFZWazTq9r4xjjOsY53zONTlVBaJ2RICgmwQhm3WCUxFlWShZVDae1Qh0s+oFKHEkQBTJkrJB5xhg9jLresq4okNuiQJKzllK5vpfE8JzezeWIT4/b/mu9EcyuJ+Uo3B3fDElVfnW2qZpEy2M4chrNKS7rPPvdTw4CmrzsH7dI0hwmdjVb0ffFMRpruWT10diil//zaZWKzpphuKZvfXN9OC/rMhGapoSPq3k0j+tSk8zNKxGygLYM4xLZkdRYzPWZc29q4lta0Rlct51Yn2tiwlp2sMUJrJ5E5y9AOdq8LyutaP/vXM23otIf96DWDetLH5vSiUR3pkRI71YEutboFitFLm7Pby761q+ELTHKPWtXwPjS9w/3qccda3xdptpSu/WxpW/uh5y63uMFtan8rG+B+YRdOQZXiFRtZpxT01JWtc+SM9PjjIA+5yEdO8lX9uFlB/z65soqs5Blf3OItZwkPXR7zY218J1W+uU7Ixe99N3w+XT4Lz3+uzYTfO90MX/dF0S3poudb1/1OutSXrvCoOz1QkD5609tMdJMyHdmhNvq3PdxzeXu66j4P+9OLvXCyd72jEB8wtUU9dq4r3e7s/nrbEVRtZ/u64PHe79zF7m63353UeUe72fm8dr1bHe+GT7zWwf5urHu78JDPPOKpPvm9F7rxihd82UWf7G2/Hd+WD7zcR7/60h983gHv+8D/jus5wj7idMf85jV/57N3/vGfT33cI0z7euu+94wXPtTTPnXeq1H1xB886H/P/Mhb3+uhb73alc924CP/8C8dfv+Jk+8WQdV99+j/vuTPr/6rlz/r7Ef69dP/fPEf2FETl07HL/LimsOcyTuVU/93QCVXgAZ4gAiYgBqkcsSSckPGcgC4fzYRZRFEgUfVZNaCgStngU2lczmRc0MEbdgWfXsSdNxiRedyf7LXIQRXe9r2etzGfY5Xfc63dXA3UMfnaDKYfSR4evKHfYxGfbGXe8LmgzbYfkBob/E3Z0RoeuD3g/SXZzhYhNv3fpdHhc0XhZTHeUqYg0xIeFg4f0iohf70gn4nfTsohKT3hEdYf1PohJVnhdA3fmy4hUn4b8s3hGAIh2IIhWMYfm8Ig+R3FubnhXZIht7nhu0WhnzXhIL/WIee94eK+GmGuGuOeIas14OQWEsGh4mDOBaFyIiISIOj6F2dOHtoKIf2J4KAp2eiiHqqmIdrmIWSKIWLyIeNuIePSIt+2Iu2SImvqIOx2H2kWIuHCIi3uIvuR4jwV4mJ2IbQWGlmiIqOoTrbZI130QAcqFXbCFbdSFbf6GIAoo1WQ44SWBFOpYDquI7s2I4IeI4U0X8ESHMRWHE0NoAVSI/zCI8AwBEI8I8AGZACKZATsRQDeZAIWWbFR1f+iJAO+Y9H0ZAPmZAk5oFQARRWFoIjSIe8WEQmqC4o6GUqeInUaITH6IvS6Iq4GHzDOIOLt4nFOIlx5pK4p4ueCJMv/9mRKRmINxmHzHiFK2mMkYiSZaiSypiL07eEJxmNQ/mLM8mDACBwLLiQJlmUPFmSVfiTc6iQt5dtRtmTywiKzRiMTPmMS7mTyQiWSJmGStmUZ+mWVpmWWOmTYgmUR1mKs9iHThmEbTlrK7glLdiVxHWKU5mKWrmKGzlihAmYCJeUzhiTZQmZaAmMQSmMhymL2qeTcMmJ01iYMdiSUMmVmegdncmYhlmXW8mKLviVcxmWXhGKlRmZOamXk/mUalgRUmmaoymarjchtveZl0mMs1mDb8mZrOmZhoGN3aacbmGO9vhy92hW8UKO0xmOSOaO2Jmd2rmdVMWP8piP3mmdHv+nj+D5nP4XnZsygryJk3nZWRI5kQMZkfDpkAemnqopmBwJNx+5FV92RSRmn4l5n77XhWSpWxgWmLuZa8LZWAdKlezplVeJnB+Wn3iZmbS5mQx1nLo5oRQqlGaJocN5g3IpoR/WoPi5nuuHhwtKWCaaoAG6mF/Coe3poZIJojOKjJR5lxcGYSh6oR9qnBG6oRPaolX5ozUKpCMqpCXKowJapEeaoUEaozKqiT76pCFKo6ZYmlI6pUT6oFRapVmqoVsqo12qmUZ6pTLJl49pWWUKpjdKlEiao2rpYEwaoC+qpXMypRZaoV9KnDYKoUk6phzapn56pm+6lwQamwZapwj/6qRhGqV5OhzhSZ5NJZ78Z6kTSKmVqqncWI7jiKnoyJ2iOqqkWqq5Mqnw+J3liY8XyKne6KrgCKviKKsqMZ8IWZBSYasH2aOFaqXI9Z66ipH9GKwCeWAWCYJgZqfPtp9AFJJCN5I22Zq9iqayCWx4+idnRJIk6qZ7yqekKaaR2phsuaaHWpy+CqhyKq0sGZw02a1YWq5xma7bupageZt96q2Kea2LAij12pfneq/viq626a966JgF+qcAC6dQGqjhepqvOZaKirAdGrCDqa8Bkq3ROq/42qQOJ0dzmJuC6qUTq7CPyrDYKq79Sq7uSrLwGqcDq7IUAbIN66KtCKkn/+uwWwGbOsqyK1utAqumB+uX2qqk3JqwPlux4HqzwImaiNmoHRtS4iezSuuoLfuv32qz+4qxBhuxhtqz5lqy8kq09MquocmxZrqwYRuydPmwdjmnFDuyR3u1Jpu1vcGcVmi3Y+GcrLqpqKpT1emptDqepjq4hFu4g9u36Hme0Lm4itu49QgtDBC5kju5lFu5xEWsxWq2RQu3vwWsuiqfmAuRJKYAFMUAQpSsTnu23sWsSGREGjkcpAtO+deuRvu1VQu2Lxu0GBG71DS7ZausxpSoO1sTvItMvmuvnCux+Zq0dMsaxdtLx0uwtzu9Xhuvucu1u0tR0Quzydu1tWu9QP+LvRfxvLK0vbpLrbZbvS4bvsNLE+R7SuYrvtT7vd4rt2k7s4bxvpwUv+2rvJp7hw+HmRShv5HEv27Ls/RrtfmEvARcSAasrm/LqyJ6v1PbFw2cRw+ssRH8vxN8vf2LEhfsRhkstht7p8x7sb0Rwm00wmq7uRKMox58wMSrvRD7wQrMwTDMvjLsvjTcthCMwN17wzCKvxbcw6kJvMYXwCs6wEbctA6aokpMuwCgwmDEwkSsuvMbxLirwz88w7JbwzvsvyaMtSjsvER1xkMlAguBt16ht4n7uG88j3/7t5NquHZ8x3isgIjLuHDMx/tonn3suAcUuv+Iq4Qsukjsop7/a6ugG7rGyi4YeawvDMQjxrpO0Z8pGGZ/2cLTmsXLS8b0wq9ki7yTHLcLLL24uclX7MI4nKbCG8ZCm7EkvMFjPLdlvLRse8Spy4UqKsVS27w0u5qgDK1ba8PoK8Zm1stQ+cu3TLXqK8QWG8paO67n68mtjKjKjLzMLM1PzMpDXMFrm7Ng3MXpm8DHvL6vTM6WKMucXMI1a8vcjLNUobOwfMO1TMHA/Im57MQnGm3RTMzULL/PfM7QfMLxjMvi7MMaTMnXXJtcvNBfWMz1TNAUbc3fnM9ZybQCrMXWDFK++bGqDM6sfM8xrM4owcZsi9JU4cZ+vKpxnI9zPBgs3XJ5/1zTNn3TOrbHguzSLd2qOr3TPp1WCaZlizyfpVzOHD3QDMqo3iLJDY3MFAagyVzNSl3V5pzFUm1MErDVXL3VuCPSnWzVSX3VLMrUsIQ5M9POtLzL80WoHew1nAPW7tzNbW3WnnY3XtM4zSyyRy2xWU04QLM2NYPRWCzWfa1edj0/arM2B3A9B+3MZB3ZYy1Xbg2ItMPYB0DYI83WdAp4ApQqelMy13PY9ixhf/07rLI4NKPZYS3ZpM2mic0zn40qoX0xev3aHW3aQ+3PO8HYcb3Xhe3aT43Y7xwAaB00q9zak43bZV3cAdABT/PVwL3ZdN3Zwsw1QRM1yT3X/aynkf/9yTvhAA8w3uT9AHrN2twdzFwa21As0MKdyIPK3rxM1e/N2Tvq3KQ83KUt1C/Nt34r06AajwHejwOuqkHNVTid4Aq+4CH003/MVQUQ4RI+4RRe4RG+EBae4Rm+QID84P3dVFJduSI+4hiu4SYu4SV+4ibOFkUNn8wt2fapRAql4iee4jRu4QtRAk4N30PKpJDUS2J04xpu40I+4QsBAboc1bv940vkARbw5FBuARvgS0Re5BcuAFZe4Uc+FBIwAV7+5RPg2Oi91nr6YExeAU9TvEGe5UaO5WyO4gKA5Dxx3Gyz3WS+3j/y4wpgOkFjAclU5VYO6EW+5T/jNRkj13f/TqY+7kQbsDYa8EaCLuSRfuOErhPD8zTK+eKTbZ+QpACM/QGxu+ZvXgCTTuOVHgBdI9iIztDePdYL1EQWgNkVEE5uPuqlruKnDt2M7diaXp9LzkQagNkVQ7qi/ua3XuNxfjnCrt2Pzdf/+etNXjJ5kyohUOxsfuwrnuw5cekVM+2nQgEjMN3K7esE9urVVDKJkyogYO1Zju1Dru2ojjirIu7pHd953kSNfjHebiogUO3uvuG1buzwHgDcvu+mQkDNHtybDu3V9AFr4wGQHvDXLvHtPvCHszbS3evPXu4M4URo7jW8y+6BTvEjL+fK7jV2zurebeZPFOtBM+URP+pXjC7zpG7xAcDnMyPmCU/deJ4hTB7sMwPqf07yg070km7zl00yg73qprzxgmnuTqQBFTD1VF8Bjz70NP/vOG7zAeAAXv/1Xm+3Gt/qLD/jMq/1Ws71qFzfZb7oZm/rRk/pas+9ms7pFCXyRZ/1c0/fy01iDp6PIoDGgg9UaE/hhd/mJADggXupi5+p3hkQACH5BAAKAAAALJgAGgDeANgBhP7+/kc8igAAAACAgNDN3Ono6Ts7O/4AANvZ58jZ2wB6ekpAiTw6SEc2fncsZtHl5cQSK4slVrQYN7Kxv2cyciwAAKoAADAoXaUdQ9AOItsLGkEiT6qptr68xbvS0gAAAAj/ABMQGEiwoEGDCAoIECDwoMODCRYWAECxosWLGBUKKIDgoUeCETd2/Ogx5ESMKFFqLPAggcuXMGPKlDiSpEOTKXNaLLAggM+fQIMGXYBgYU+hSIUuWKizKcWFCI4mnRpgqYCoVKladdp04YMBYMOKHUtWAVSpWZUy5ZozLVUCC91OXcsW5UICcpPezYuUbt2LCxOQHUzYrAC8fIP6/VsxcVC4AhwDXcwYwF7JPi9jpsw4MOHPYg0j3iyg8kXMPiGjDsD5r2bJrx23rusZNGjRq2ezXa0atW6usRMH5/vbaW3bhYfnLd6Ud1zfpU1XVC6XulvmOo8jL2s9Lfa2qHuT/5Y+/XBu89DJP2W4PTn68eqdR06vvntW+1S/p9TePixu+uTJd556lr0Hm4GyRUcef/0N8B98AYb3HITS4TcXgsIpWCF7DYZmoV4amiYggBuOdqCJCRLIYH8PHkjgiBSa9mFfGBIXYmUrttdiivFJOF+MONa4nJDV3dgZhx2CtWOGL/o4YH1EXheld0a6hmSSS9rYJGbiuQglihmCqSWUgiWp5IxC6YcSjF4uOOV9b+ZXJW1XdpjlkFtK1iWPboo5pJ9Fqlhng3cG2iOXE7ZZ4nmASilomWYW6uiheibK56LQNUrlo2aeGeeFeTq2J5NfMvqkm5BiiaZic+7mJIkyfv8KoqZwctqppJtSmtgCIZ13kmkaJYBWhsJC92tlwSqg7LLMNuusYcWSdixjGi1k7bXYZqvtttx26+234IYr7rjklmvuueimq+667Lbr7rvwbtuQTQclFBi9N0lE3ko14QsSTf4ahJN0/AZckEn9BjwwsMPmRZRRmG0FJVaSWUWxYxK7efGuZ0WsJkZsXhorrXKSDGqpmZ4qXcik9mkqrEGaPKvKIr4K5JEy05hzmq0CJ6vONFfG8pgupwwzzi/f/NfQeKJM2s6sCgr1ZD/zHGpioxKN6dNBI2200nUxbWjRXB9t5dQ/rUr11Xxl3TTZJ3Z9dtKK1ozoj3XHTLfIen//nTdjYk8Kd4JoZ9azcVWzWjhrhzdn899z+82312WD7erdctO5uNppN55d4lRv7jl4mJut+d4tbx236ZdXivfkkVcO+emSp26365mzxbnhokuNuta3i2qp7X3LDjvtxhMP+OPH6w565707vbrlXAWe6+Bh5u6z6NFHWDr1iHOvffi/v70y88rHPv3szovPevUR92rsvoE1PGS0B077V7D2F4l/gvqrS7XiRcACGvCACEygAhfIwAY6UFzzMpi9GGKwf22EfiKp4EAQpkECLAxZEmmJTEY4QoBp8IPU6l9aHiYAFWYlYxvaGF8s5kKtfAwwAvjKrTpWsRtaxHq1kh7h/8bXlUGxaHeMY1te3DY21Q3xfYhLlZ2Q6MPGoA94xVtf83wmRUJRcXQpAaKcfFc7LB6pi0d8nuGUKBcmCs6J2YNiEdGooy+y0S1uvB4cbdS9DdFxO7gKovdwJ8fPuQ98c+yUg+yoK6wNz4zqeyIiP/dH5ARyjI1s2yPNN7LyNTFWlbTNJU82SOG9Ln3I0yIqnRfK2zCylI48JSRTKUn2cVGRowTRHdOSR0Hu8U9EpCQuX3m+79mSfGXkZJBa+Zlc0miXWeklJrHHx2DmJEeAJGbwYmnN/aiRcX0E5TC/WUWKiJGUvyxSOJc5znUuz5hbRGbyZsnKdnZzTVdUJuVUSf/PW+6QnGDEJzxXuT1PvlGc/3Tn0vL5ySzWMp6JTOg9QRa/TRYpgGzhH3T+lyGMciVZzwqps+o3P4I98KQoTalKV8rSlro0XiSMKUweECyZ2lRfJs3gCU1YQRTuj6cG4+BOL0gwBSjyAQsxaqTKWSAZOoyHGGMqVDl2lRpOBYamUaqZQqLVKQYUI19UKC3jOMmcdLVDXP0nGeepz0iS9ZhNOWuD0rrUr+LwkHA1pEH1mFVF0lVVdrVIWCd61736sq+d+qtX18rPto61moU0q1+TqlYhvhWiek1mQxkj1/4o1ouBLQ9eMXtNgIqVLZ1tz2fTyNiHElSejd3sX1K7ndX/1jG06xnta4uo2346hbbIsW02cVug3jq2fYadpnSAaxvhWlKqpiWsYKMb2ZQwFzTOFSV0jSvbxwKzuii57mey60riDha8YKVuWa07WQGIdzCGae1ld5tZtnYXte19L3fMq968lpa7B62MfslC3mZuN7no7KRmA8zZ/FaWmt9dr136S1r2JpaydZUvZCWcXgDzVcAOzrBlN+xfb3r4sCC+sHsfnE4pnbagC/4wZ0VKY2VxtcY0ximwSEoajtrIo07RaI+teiEgN2WAL02ykpfM5CY7+cnXimDAJihlf/lUgEBVWJatrGMQbkSENiXhlvF15YwSGSksPLNa+OtUudDQ/2P81WFd2+wWrAqNoQx2K4krPGFmuufFTjmnLkcc4RL32Z7oPQ2eZaznQvM5vX6GrzbvPFDf8hbBg0YVojms6EofF8b2zTOdIr1fQDvO0/cFdWxFXU+JJvqHi0bxPl1raWG6mtOwRjWrVU3rT0dx04amqK4Z7V11Src8pB6LM62WySVadNew7XWqf33rYHeakLgWLaafSSZgP/rapjx2brfNbE1X+9u5xra1p3ti5SK0rqbWiaC5DWFjvxrZ3qYv6dSNbm3HWNZWSraHAArNtzyb2Mj9t7vZeW59hzHWC5/1fGtdWoH7Z9Lv5LfDJ9zuBDMc3uIGwLzL3WIqxbvi+f+m+MOHDfBiuzjk2HwuwZtdZ/lJC4M+vp+aeWZknYAUxyHlcf4wCOWiG/3oSE+60uXVQSp3sMwfHTO9hNrTLlNL6jahelCtvr+dAyXNcBYUnVc41RlCd+wvLPtTiTvyqBHa3tket8I9nvFw31vuoUY4ryfu61NrXOUcJ7fb6/3yu4sc4nRvNNzX7e+8t7x1do97cQW/trcXXvJtrzzhTQ5zCm9c2H/ve31Xrfdo833afo884/FO+sfvfc+fBzc3DX9eydd+9YdnecQVf3nc377f5kR8pksOp5ObmPKdK/hUpJl4l3Oe9p4HPOhVD/zJz334Cnb87sMmfHoTv2Sd7zj/9ikdetSPXtrQvvT1vb9NTcpS9P9F/hot/3zMd5/k2W/99hOu/eZDfva2F33wd3zrh3/kR32xx27yl0T0V3whl3nJ14DgB33ix34HOEM2N3Q5lXP+43VU03M5IWQHwoFSAoIpgWRLl4IquIIs2ILvYoIqcS9DBYMZgXUkoXVaRlQ79mVhFlM2+BFQF2SC4oE+YWdHgnY2VFVhByVyBlhIeFVMJSjht4DQZXGeYnwEMmFTWICD50cpN4BZmF5b2H/jd0ZfaH5hqIUUSIX8ZYWLNHNpGCRjqH/+508gZ3hSuIZcqHle2HDSF4cFMofoV3oRdYeSl4cBWIEGaIZ+CIaA/xiIekiGFsiIhoh7iOh7AoiG3uSGy9aFj+gzgnh66WdrlVh9l1h9v5eA+NaImviJB+d6pgd7fzhdnIhxn1haoSiLjohDtQiHt4g4uehoqrgevYiFj/iK+/d6wjiLq1iKw5iFyFiHsbiMu0iLZziKvxiNZch79YeJxRhyp/iMqciMxHiNhPiLkJiIbGgrzkiOAECDF6ERvoJzRGg4JEgl8LgTgQF0IiV0AER0LhiQAjmQBNmCVYYvTjeDGMQRTxdCPShmOuhlDDmDCcNlOtVT9Qh2PcRmGekV4yR2HamEG/mESVGPqaGNk8iNcNKLGkaNi7caCOiOX8SSEnghvUUBDv+QkzrpADApFMy3jc63kh9ZkzNTOxFwAEiZlEgpAT0JFD+ZkkEpJzS5eQ5YO0p5lQdAAU15ku/Xih12HlP5fTa5N0eJlUkJAVsZAE+5iCoplUNJlRMoOw1gllfJk025lp7oUMTylmJZlLIjAXR5lVuJl3yYfwkSloYpivVHAYF5lRFwlyjJllF5IYipl7oIdxnQmILZk4QZgXBJmXyZmJd5eRhwlRZQARaAlRkAmV2JjQTYY6FpmS7Ze1dZAQJQAViplZwZmXkpcTZSmb45m91YlkhpLVfJlKwZjP4Tm8H5kpeVmcW5EJuZnJE4gszZlmMpOYwZnQKglI85mLxZmLL/uZwsJprCWZVAAZgHYJxJmZZqGZ6e2Zc0ApzY6Zeq1AAQAAHWkp8QYJfg2ZrnGH8bdZ2TaZ/SZi3u6ZTwOX+fCSL0WaBAo1kImqBcqZxS8qD8R4cGSjUAen+9WZ/zSaAZOojoKRQXIAAXQKFVkYH/uIEmuY/8+Cxch2UM8aI1ulE2WpA6uqM82qMrlY8VsRI5BaQUIaRDupBIeqRKCiyCAiVN6qRQ6iZPKqXoiDhRuiFTiqVXGitZWqXetKVB0qVhCqZHIqZeikNkaiVmqqZpSidreqbr0abO86ZzKqc+Q6dnWk7QhadWaqd9SqVwOmF+WkR8SqiD+jmFio56yl+J/1pajfqlh5qNuLWnkQqpgKqllxqo01Wpgsqp6fWonZqpmhqnolqmnoqmp7qpqZqGiwqqn7qqpFqqbDqqqoqptsqlsFogroqqtFoeuUqpsuqmv0pcu+qrw1qsxhqsddqrsYqrynqnxxqtzKqrz/qntzqm1Wqo2aqok8qo0nqtprqtkvqtzgqus2quwjqtrUqu2Iquy+quedqtyNqs5Vqv7Wqv1JKkEqmv+7qk/bqDRPqOM5pRPlqwBnuwCLsuB0kvCVl1EXl1D/tTEUujAVswFNmQE2tmEVMULbSE4mqp8Io4JIlmaudmTAWBDBqy2qqy56eYsJh6AIiJ80qtDRqh9v+ne9K4svh6rubpnO6Isgz4saHKsgIqiZL5f+7XeTN7YHsYn+3nbB2ajNa6s+kqnzxjjLKXtNC3tPyliB/KfTgLlO9KtWM7nr1XfUALrEQLsj17ts+Ytt4qtK9as1f7gB4qnuG6tkPbtt2ItnfrtPcauHnLtyXqt2ELlWU7uIILonVreHDLtVzrtXi7UId7tNAqt7xqtYpjt5X7tYnLs4sLoY17s+Xnmnsbup8rupvruH+bsmR7uXr7lU3ruheotQEIuURps7j3uOyquM3ptj9bUVFbZPwKsRU7sFF3o0NWUjuYsM77vNDrvAtrEw27dRmbvMd7vUGGvNt7kdabvRX/m5EcG5K4i7nTNbJrNrO8a77JGruFtbU0h0cLGrTuW6v123gyG7+8NL9q+7pTa7YByra1C7VK27uM67mw+7RtxL9xe7/0CsAvi6j6G00MXL4ObH23O8EGN7xim8Co68G/G8CnW3cxi4oN7L/ql8GwZLv5y74PHMIR7KgavHwVbMBVS7hSq7MDvMAcjLggDLq+e8CTe8M7LL89bLn/G8REDMM5LMErTMDw68I0q7lDnLpIC8UqfMFMm8UKbMQFLMVb3MLFVLoiPLda3LVfPMYxWY3ti8JOTMWAq8RXzMNp7MYyTLcInMSUS8YxLMBy/MOqW8WAPMdeHMVn3JJembld//xCLNpRxSux4EuPzPuvXha9lnzJmOyC00sS1ZuDkeyvxruQFUlmP1gS3Htk4gsxG2nBduxN6DsUJVtnJ9u69NvKIwzEQgw9nMvHTXzHh5y7o7u7tNy/H6zHgazLrNu5gmzMVjzIyuizbBx8yhzHuLzE1TyiLtvLK8fLOfvGtmzGOJydpLvGiWy/36zIEByXwjzNtPvHzOzM0wjN5WxF7FzLxazD7pzCGurDgTbMJ3zP3pzO4rzO3NzB73zQ+JzLvLPL5Gy64AzQvhzOGxq89UzM+RzQTDzQhlvQ/JzQ1vzR2DyaZZy1WCzG52zOAj3R0Zx7HI3EHt3MCN2yIt3HAv/V0nn80vCM0/EMvCu9vr8Mxpno0CRNx4Z80m2c0rpbfQ3gI0stGQ1wyj4H1SEo1SdI1TGoRk+N1VZdg5nc1V791U72yZQcyqAMyY9MsWedUQiw1mzd1m791hMBFW8913Rt0dec04jKEXS912zNFHrN13vNVKncsatswzBdRK/8dbFMdmznz6wM0a+5zy4tb45t2HhdtJJ905Rd0f980RGN1MG80Q090ijt2X6s0ODE0CUsjp191zvdt29b2UAdues4w0nRme3s2jq92zJ9ntpc06NN0w9t2rd8zAudzDa9zLz92cQtu0ar2fsW3L893LqN0ahti2Cb3NQM0pd92sb/ndrILd3dzNzVTd7fjd2EvL9HDN3efdjWfd6+uMgUvN7K/d7ubd4h7dvjvc3ibdDL3d7dHXiza88kzMIm/NgZrdLzLM3andvcHdP4/cw8veAs3d8dbd8BTt3wjbXpZuGTHeEQDuCvXbjP2NSuY+KJkdViPdZmnVOFk9UvvtXxCNY0XuM2/kAr3uIAm9bYy+Pdm+P7M692/eD/Lah/Ddhz7ddIHtjECqtDft8ijtghmdg/YYSauq6zPaySu91wiuU/fcZb7uBdLq+WHeIC/tz1Pa5ZvuZwfNyH6OStTeQYPuLqbIpwjuDlHdkkOt2A6OVG/cJ5fuaZnea36OeQXdxQ/y7oe77frkjmbP7nGDzoXB6vZV7kGp7f8izUcWjozY3Ona6AAx6Fd17pc67Pi+7f3ErqIG7p74vmk+6lnB7ol57hrS7pYk7pj37os07nGv2MQh7nib7rpp7NjH6Mjv7lkD6OK/3reC7nmH3qF67myK7rnn7d8Y0sPh7V2ntkMq6P267t2RvjWv3tU33j5n7u6M4uQE6w5F7V7X7V687u8d7jBLPkdB3XV2HvSQ7stF7aanrk+r7WSh7wbS3YG6vKUcXvZi7sn0PlRbjYadfYnN3swV7r0P7h/L3aMqnwrA7qrn7r/Tzxqh7lvZ3ppE3PDU7gsl7tzq7nxI7q0a3xK//95P2OvxfP3sAt8xRO8wtv8S8f7Rlv4KxN8RIO2xSd8jzf8Ued4Ekd2yKf659u8z+P8Tkv9BtP9Lyu4JqO8h6O8wy/6qVe8hO+9Qze9YSO6DUP6Buu2lY/8xwf9s8+9V4/1IXMxdTu72sf3jpP9lOc7Ggc6hKP9G8P9oTv8jPN5x2+9yev9hWP95g+9otf4Yov3Czf+Etv7RzO9ZOP+Jff8mif9U3vjigeS6OfFyqe7eCO+uUu7uKeven++rAf+9sy79z+7lxN+7WP+6lf1gL0AL7/+8Af/MK/HgTv1kkP9yTvqAAf8ANf/BxLIAM2FkhF2AmP9UpP/FNOvuoR/WL/0chXP/KKLvdnjxHcHxbe7/bWL/ZGv9LlDxbnv/ODn/yGr98wb2FbVcNQv/Ieb+sqXxftPwAAkUCAgAAFDR5EmDDAQAANHT6EGLEhQ4kVJVK0mHGiAI0dBxJQGFLhR5ElDWLsWFHBAJYtXb6EKZCgyZIoU0a0efNhTp0be14UAJKmSJJDQ/LsuRLm0qUyjR7l+BMiUp1Ub1pNWfQpQq1bT0aV6lApU7IsnXo9iNUj2LAA1Gp8m7Er2rle41ocW5bpWbQF71b8C7Stw8A4g/b9KhRxYYh59cYciNgv27CMd1KWapnwYcl1t2pu6PixS759QbvF/PP0as6LW5tOnXT0/97InWNXvX01d9bdaxXD/k23t0bRsweUFj7Yp3LWylEHt/s6ufLis5FHd96c+XC50qND/8wdr3HIM11nFw84veDt4J96fr8+YvXR18Ojx78dvXuj8PvLb4y8l+yLL7/BtDvQu/D4G+o0+h4j8D8D20KQQgXjY5AmBwUkrbbz9AMxwf0kW+hCCalTIEUVV2SxRQ8GagCxBgYqYLACaLQRx7ZuFKDGHf2jaUYTg9QxLB4HQjJJJZdkskknn4QySimnpLJKK6/EMkstt+SySy+/BDNMMZv00cgif+KxTKnSzLHHNtVE88w43fyRTiMRwDNPPffks8aB+AQ0UAQqrAzAy//wK0BQRfFkKNFFBT1tAcQWGFQASftagNDMDN0MPQQuRStTAT6d9DQSCfAQtgkLXXXTDE0CUkNOGzo11elEDNHCEW17taZZAajVPFVzZZVYV0mMFdZfg0X2V01Vc3ZIWXslalnJUBX2Vl2NhXZX16iFyjlmbWu1W257StZXcEeyFjFsmy0X3Wi9BQ5e5cb9EFd9t01w3YTSrVbca23F7lzc4sXNX66kVVZgdwm+z2DdENZN4bQY9tXhvt4lV2LeKObN4sTsHQzfYfctFuVjeSW5LZO1TZlfmQsVebKaS9QYLY7znXlTkH1r9mZTB862YJXNPVpeoTEO+F6iW445ap//2wsaaqleNrpnpLVWuuqOS376a64P9hholsUOC+uIkyab7Yq95jmsGB8WYG60hISzJzbrzFunvc1kWiEha8a7zTEPRzxxxRdnvHHHH4c88ir7vunvOSlPyfLL3+Sc786N/Lk7RB+FdN6y4TLdObfX8hRUr0QNXfTTZVd97I+pRnvq1VmfvXb2bA9ZIQmGJ374CP5NHXjed/ddvd7Vgy6CA6annnrkY7foWXmbl3p73A+qPvwDJFgYe+ef/537trvv+iAMxA+fgovNT5/99dW/XXnUf2sA/vAhmB/6DEO/AeLvfluj2UEg4L/wOeArBJxK8uxnwALqrzuKcQADxfdA/wFGEIKHomD+Jhi8gkhAfBaogAXE50CcdRCEzKNdCGOoOwsppn/hq4AAKvA/DsIwexKkoQzPZ8Ef/uZ91UNS/HpIxCEysX5CrOAIzWaQ8CWReuRbohRR98FOQfGHLtxMcKRHPStOr3xgXI4Pm+hFD6oRKNDJABkHQr3jBdCNUdTiDNnYRScaBjoUkKMApqeB66ERNVxM4x5fmMciKsQBEYgAkiApvzPesY2WXKQiExnElUGlaFnkJAJD6T1N8pGR0PPVJyeDyEMa8jSIZA2DLiCACzQNk6Y85RPR9LnN1YlwctIbMP0mzMoRM3PG7MiRJLdMZjbTmc+EZjSlCSbMJf8TmRnRXDAFYABudtOb3wQnNwcSTnKScyAJ4GUvAVdNjbBzebk0jAEYME961tOe92TAOMu5z25+xFGk45P2DqhI0LCGAc3iZ0IH0gHXbQV2rvwVLKN1UNsklJ8DmYDVRLnRPRZ0osKDQEhFCoE6Xsyi+8QoSEcaUhbasY+XLGUrX3oZih5kjPCz22ROWs6UIuSmG6zkTHGpSY+ip6ZUZCAWv7JTcwogowe5of8wEFR4wrSURd3OUQNwRAZSUqdMBWdPDWJCDeZUoBOL6SY5ipujAlKDB8iAScH6TbEGwK0aBCAo14rWtGI1QUdd4FsPUFJ9zrWfTj1IHAXb0rOK8Kr/szKoQTIoWDN+1bDiRGxBfqpBQq4SomlVKyktVFOyVg+FTaplYS8r1tJO77RLqsAGPHvL0HYUsh8tSFSpl0Mnleiyh32qXXH4pNkKtbZs9OtobRq+1y4ptdv8rQHq2trmKqmWLaStTGOa3EJpVbBTXWp06xoAwZYUu8bV7mMhqtXNAjW8vx1ve8NH1VEO1LbrTUhg4Wde34o3swfRAANbWtyqZpKotzVqQu4avs7KFb7/laz/8urSAg+VoAjOqiMdsGEOOyCnll0thCXbYQ5/+LwVPq4XubsprUYnutIVcdZQnN4D47ei/g3uydD7SjRG9sYPzjHM6svXHaVzmHZaE9NAWvwUIb14IByQTOE8N2UjBQQAIfkEAAoAAAAsmADJAN4AKQGE/v7+RzyKAAAAAICA0M3c6ejpOzs7/gAA29nnyNnbAHp6SkCJPDpIRjZ+eCxl0eXliiZXwxMstRg3srG/ZTJzLAAAMChcqgAA0A4iph1C2wsaqqm2vrzFu9LSAAAAAAAACP8AExAYSLCgQYMICggQIPCgw4MJFhYAQLGixYsYFQoogOChR4IRNz5IQLKkyZMoJWJcyVIjx48fQ76E+VAmy5sWCywIwLOnz58/FyBYuBOoUaALFuJcSnEhgqJHowZIKuDBgKtYs2rdqkApU5xOoUo1SvXp2KhUv+I8K5XAQrZRvapduZAA3KMLE2zdy7ergLks6941KnjwT7mALRr+6VbAYp+IEzcVYPcxz7x8M2f1K/liYcufH0eWbJln49IBRicOvRizZs2cO1dkbZj2YNWAUZ8ujXuu7buuX/ftrfY3XONsiX/V/Zb3X9mTK4NmKDxzbOjIz2Yfq5wpc8fOoUf/Rx28utbrsrdLVR/3OfTv5N2npxxfr3mu3Zeyx0s/vHj4/mHX33T23bdZfmANKJqCrcnXGYDTiQfAfoRRZ+CBDkpGIVAbHpZhYhAuKGGHkFl44VXodUZiTyte9mFupe0WoXgtpmbiiSlqyGBtO9724lwhNjhij8DdeGGOqxF5nJLJ/ahWkDwOKd2CBeKI4E01ZunkcjE2N6OAUzZY5ZFXBsakdmdyt6V3XYL35Xxh8jimgUgCpmWcPkoIZZ40prmekXSWSZef7eFZpJ5txidlfSdiVadvhPJn6JKIWiajiH1O2uSc9z1aXKQVaopmpY9dKmSmjDY6gKdf3anof4kG/whnqo2yypSrsj5o2QIhxTeRbBolIJacChRr7LHIJuvXr50FO6yPwjrHrGQaLWTttdhmq+223Hbr7bfghivuuOSWa+656Kar7rrstuvuu/BuO21iLgGrkr0bQVcvvvMCtm+z9wK8UUc0OZTAkH0inPDCAiosoFmPLeCwimvqV3GCE1OcsY6ijrWxnRdjGbKZH4NcMqR4OqDyyiobdXKrIw/6Mswz3xozRrRBcMDOPO8sAVA1Y8zwfEEL3TCqP/Ws9AEU/FQ0yUNrHLWOT5sZZgZL94yB01Ob3LXXR4dNNNI8NZC10hD4VDXON3vWtkWCsr223GQHIMHZSqv99adzu//dN9xvzxb4eDw5gDfaPf09meKL78234zBLh8HhefPEeNx+Q24z4xMO3rl0Ovd8QQUXLB1B4pobPbbYUrNOdd1KVyBABUs3bXnqInOOOeC6ex5a6Dtbq/TPqLsO9urIt57862D+NHnwC1Ve/PLHK28989RXz/FPFPAsPM9p6208pL2Xj7vIcd59wPcHaAD0+TLDH//45MvPNp4RRGBt/hE48D79NLNf5gAYQALabFLWksrlfMdA8xnQYgj0kssEKDgHZq9+D0xQBN00wQxCzYMfvODjQDioSVlAABaQSr/m8i96BYxaL3Rhvvilrxj6y4YszMuzihSvHvrwh0AMohD/h0jEIhrxiONqSMEQEqwlQgSHammhDFcYRSh+xSUEcyJIJJJFLRLAJsDaIVuEQpRd7a6CFETjw8R4lrKwcSxpgZWlJHgqEtLNjm7r2J/02B5SLcZUUUpj4/AIOD5K6lXvidWbrpek3hkyVIiUzZ4OJcjOOTI+j+SQ5yZJqUqecZCzck4mPeRHwwCST4RUowgjh8lI6mqOHKzjKjfnSVBpcpQlKuVgTknJVIKSkSDDJYtsSUo5loqOgfSlJWspTBc100a6vAsvO6nMTy6zeaJ0JWkUialqNrBuDXqmNTnZJAsCE2WtzNU2YanNRtZyUdlUJ4i4KctzjnCWEEznItd5/8xYJhOfqrMnK+O5z3myU57ac6c3xUnMXBrzj8hEJUBzx0x9dlOS9PynQGm5UIvWk58Q9adEN2qxSxL0oq/sZzsTylIMhnI6DI3mGHslrRrOUGBUvKIVmeIs50RrOjnlKRKHStSiGvWoSE2qUuOlRC8mJC9e3OJNYTjVKdo0qEvBYlQHAkac6uuNUiGjAMCKFmua9ZtrNCNaMXrQgrb0niTV4EpdmtKQzhWu2Isr+u5aQLaqFKF0zatgkxTTSpJzVO+sKGDxatC/ujWwCp2o1fjKUb/adbF91WsIKVZYZR5WTeYcbDApW9KHmjKivZTs/FR7P9IGFKSnFWlqNbta2v+2FrOVrWtsXUtRb8LzsYyFUVtRKlrIvnWgwM0sbHeJWmqydoDPLSRvN7tcaTa3nIntKG5Lm8jhfjSytoUuZ6db2+rCZZrY9a1ik5tb87IFvYhVr3bZy13L7na7rwVvcT/V2ehW5LN/Cq1+CUveO9qXubJ1bnh5t17iDli4jnXwcdv7YHTit7cHhguvrosmrOJEijesaog9fJOeAvWqNl2qilfM4ha7+MUwzlZTtfhUhmz1izvNao4/vOMS97glXLyxTLqoxa5Si6xHESuSyXJWAYMMYotxo1pl+l4Ogza7/iUcTAt8EQD3EcsLFhxD+6tbBHOZwfJ96YLI7N6zwPf/ymkOs5bXfOb/ZnSk+1VuhfnrUY22eSxvDjCY83zAPuO5sZelb34nXN/xnvS7EE60hI1L6eBaeMsXZomX+ePkSiOXzpleyaYr1GlL8/nRfkb0fRWNYTlfU83hrDNFRq3JUut5tKg+dKRXPWlT39rThc71bP/cFisLOs6EzqewFUzsqAT6y8je86djHWqM0LqY0Wa0XJed3gyf19jQzvKrXY0rVmv6zsOWNoW1vVdux9fbVU5wt8m9Vkdj2twraUCb9P2YBvx4JSDOoYgFTmIgN7RsLfL3wKsY44Y7/OEQj7jEuVVwgP/7IgFneMUzcnGMdzwnH69IxnW68Csi4OQo/0+5ylc+Eaes/OUwb/KgG8kRmNsc5Uqp+c1tbs0lI2UoY53yzNltJigbRsoRGye6ma3uRid72/fuNZe8m2qil7fpUAc1vq299Hk/vdVfn6y74QxvN4Ob00MHtrKjDmkgdf3d9DYp26vudqrrWu2Lxnu75373J72d7HFvMLW33uW/H1vcMscm39O9azNXW7xhvzquF8/0xltX3nCPvIHpPWZZA+DaDs223sVOea+r2vGERzPiGdj5xyvG8OEO/Hy1LnU22Z3xo5d87m9b+syf/vKeT/zqW596O9++8ruHPNb3Tvu2+/34prf65jWfR0Pj/vkRdv6vfb3uSzef7tiXtP/2u899pxN47IfvDL/7uX7DKHzjHi+5UOWvY/rz+OABUHgz35/iifv//wAYgAIYL/AHcvZncAUociFHESM3fwnIgAsIAA1Yfw8ogQtka+RHMzq3cy+XcxzIc55zgWlXfglidIOBdFFmTSIoeiTIfINXexJifsmnerIHazzCZjEofTRIfTu4fKT3fX2XgzLYgtTlg7p3ar0HeEI4g7/Eg6rEedaHfEuYd9tXhRl4hWsHhNc3hUXIhE1ohNN3fkmYflyIhWAHhsqng9KFfrFXhkNohW8Ih1Q4bTfoeSu4enKnhVLohmlIhEcoh2c4eXoYfXwYhl44bk74hYL4guNXiIj/qIZPmIiPuIh16HpC+EnCV4P2Noi+54h9CIhdeIhnRXwwWIiYWG+SOIpRSIieqIh+aIiiyHqr2Imt6Iqg+IdmGIjex4jg14qniIFxGGxj2Ib0gmJeRUPIeIwCs38JF4HVMoDQGI3SOI0CWIET6GMHiIDGqIxUZY3O6IwfCHMtJwDhGHOoiIY9aDIbWI4n54HsmHI9tytA53NBkYmpyEAmeBcoeHRKB320+IqfeIu8x4lK+HvfhnkFCYm2KJDVx4Zo113Z14u5GIoAuYbD+JBlB2hnR2ojyJAWSZBkaJDxFnznqJCfM4sJWXcRGYQTiYvBmIW8yJJLAXrDBIxzKIwg/0mMlneQJGmTLliJxTdrsIeRmniPpNiIazGUHMmCHilmKBmSOzmSlriQLQmLSJiTRNlsR/FsWYmOkeiVcxaTW2h7KzmWL3mTZ/mTPoKDUWl2CAmVsZiHYrmHZCl+EpmWFNmUYQmUpXgTNOkiPpmXODmXrDh1ZUmXeOmSaJl1hPmPhmmXMrmYuliRTumQS5mRxfaWOhmXgseXSHkT7RdboXkX/JeM3biNVMWM+FeaAkONrvmasBmbR+WN2chxtWmbtJmbqGlVpkkvD/CbwBmcwjmck/GOKmePYDmJ5LOO7OiOxgl0EqIAqjIAD1BGSVeSnJlW15mPGmZN0qkqNBWUyv9JmVQJk555lzjxnY0Snn0pmIlplXS4lp6nnifCnp/pnpKJn2pZJGw5F/R5IfaJnpOpl+MZn/w5n9MZoJE5oFUZkIN5ngvKEv9pIApqlvmpmAxqnvI5lQAwofdRoYh5ofDZoB/ZmCmpFh5qHiBamCT6lSZZbu25EilaHSvqmC1aniLakBd5mZ0xo8JRoydKoMhJiRsqnj76GkAKl+RZoDd6kpZZa+JxpJqRpJu5pEO6ixBqoTchpZlBpV35oth5lSaqpF/BpXzhpTx6j3KZpSG6pQm6kVDKlE0Ko/cpo2+qmV+ancP3lFVapnfakx35oEUaoxhhpnuBpnGKh505qHX/Wqh/yqFXaqX4ZyP9iaLKcqnJ0gELMZpwwZqn2ZsjZlOqqZq0KZumeqqomqrpopvcyJutGqqgSnC76S/PeXLjWKvtGKZC2kDMWY7OaZzxGDHzKHRy+p5uw51jFBbEipnOBqfYpqizN6Z9Wpe81qgOaqzpiKWMKqDn5o9BOqe6KqgHyqF/CU2Biq2VuaOJWmbAB6nhiq57ua0RKmpKua5quqjjKp7lGqm7+ltYmaYi6ZaAWqyMyaYsOpP1+qxFmZx0yq306q1kCq5rKq9a2q2HebA5mq2SepQOy3UQO63wyqQh27Dz6rEXa6Mj+64FS7FtarGQWbEYqp8yW0J8mqfh/1etHXutGeui2pqvhFp4H2uz/YqvS1Kpj4mzJauzGbqz8eqz1vp6QQuwYDqxTpuzUHuy35qyVFu0nsepB+m1Z+Gprvqprypwo1oaYhtiqrq2bNu2a8uqZBu3Yzu3sFq2Gqcvd6gxvRqO/CqxlZS3OoKsbdS3Wvu3gqSK7hqYMVuGvwhOPCEBkBu5kBs+oQetlluLSru4A8kTwLM09sqwKru0fNi4itcTh0M8NXmuTCuypjg4iNsTWHM4tgOYqiu6M8uFpKtmZkM5p5O6BGu7mjuFubuJEUA5O+M/tPu7t5u5jOu6slg4xsszvnu594q5I7q6gnFC20I6S4O8hIu9of/rhsO7Pd5COz3Tu+aqvMGrsb7ovIuivdrCvT0zu98LvNe7uoArhjxhvKibvtQLuta7vjjqgp2bNQpbvf9rveOrvzzxPFlDucmbwHoawAMswHvZPVnjPgcMwLUrvu7ruAHgABAwwiQMAbM7vQs7tRTMvilrtH6rvrj7waX7tDwrqZyTv0QKs/drv8y7wy0xq3eLL6QKxEFstyRXqm6bxEq8xNVIxA5oUwYQxVI8xVRcxVG8EFacxVmcF078xLEaRTgMKQbAAGRcxmZ8xmjMAFisxWwsxXWxtx9Yv8u7hAsMMgwQH22cxwvBAfToE3EEw8Irw/Nxx86Rx228EBMQFSf/lEIgC74rzMLgS8g/IQH8U8kQ7CKGzMaIbBSUvD/5g7yVm8KYW8eQIsk9UcA9E5prnMlWvMk/ETrscwCfq8KPzLpzLDimvL940782wspbLACJ7BO7uz7RwzMZsMG0/Mik/Cm5HLt4c8Kr7MtT7Mo9oT7ELABK035ybMExuMwwY8oYfDhbM0zSXMXUHADhfM1Kg77+K8q16M02Y8rFa7yUG83lbADn7MDqrDSgvM0+3Ly1JMmGE72yjMn37MbAfMpKE8s7o8HtjMArDM8WI8nWzDOjsy0pZM/lTM0Vvc9KYzv+3MOBHNA9Mcw8Izvc0ssHfcUJzRPpfACyY749g7ohyg3JrUvSCi06KJ0tGS0AK83Swfy4Ol062YzCEE3BEp0guWy8x0zOP33OAUDPoXzUAZzUIpPLqCw9Br3SUJ3VPYPME4zUgkwxuRwA8/zAHvLT+NzSPqEBeAPKEezO7YvTPvHSPOPQTs3VbN0TA206sxzWVT3WOlLWIcwyKzOaGi3NUF04hq0yiB2+3BzGzIzHT73XNFzBkX24DETYRaLWi520Ns3DdCzYScLZS+LZlm21NUygcEu3AmfaYZvYvrwQG4C233ib8WeNAQEAIfkEAAoAAAAsmADiAN4AEAGE/v7+RzyKAAAAAICA0M3c6ejpOzs7/gAA29nnyNnbAHp6SkCJPDpIRzZ+0eXleitkthc2iiZXwxIssrG/ZTJzLAAAMChcqgAAph1C2gsa0Q4hqqm2vrzFu9LSAAAAAAAACP8ACwAYSLCgwYMFBAgQeLAhQoUMHUocmHDhxIsVI15smHGjxI4eOUIMKdIiSYMaTxJUqNIgy5YFX8IcKHNmTZg3W+ZUuXMmTgE+AfQkOTRkUY9HNya9uHRi06BOgfp86pBqQ6sHsbqUOpWrTa9QfwbVGhOsTrM80Z4ku1ItUbdhkcJVOpcuVLY06zLVG/Uu37hX/wb2S3is4MGGCwM+m7hxV8VfIYtdzPix48iXJ1veTPlt5sqcNWMODbqz0cNbJZcWzXq1a9N9SadVPftzbdmwq6IuS3vtbt62fffOnfV33uGnkctVbpd489HQW9+O/lq4873BPWdPvn159+t48Qr/NX78O3bzsa9LDE9+PPPzuLXHV+9+vnf7z6lPl06/vH7r+MH3n3wD0sfee+kVeJ+C+fUHHIMC8keghNwFOFFKJ4HUkoYbjjQThyqBmKGHMIlIkokhVaTQiiy26OKLMMYo44w01mjjjTjmqOOOPPbo449ABinkkEQWCWMCBCSp5JJMMolARUg2KWWTCZDYoUklWhmiliMuhMCUYCpZpZdhhjlmAV+WOWUCCwTg5ptwxhnnAggo1KaceMq5gHh8ttcnAnfmKWgAewoA6KCDFnooonkGyiieBCj0qKB9IrienwIQMGmeCmm6qZydfioqopEKMCqclaJ3qV+enhpAqK6+/5opng/UamutseZZaq6pWriqYa2eCqurw74ZwQHIJossBLnKuWusvUKY4E/BjlqstbPGqey2B1DQLJzPEouppbqxymu24laLAbfKavDtm+EKO66q5QJ7brWiFtsAu9tG8G4A8WJLLmK+6obvp9fmi24AEvC77b8BKzxwcRNvdfCmCSOM7gMOb4vBuxFrXPGD0l568aQZY4yuBtteUMEF7IIsKbTzFkzwVyc/mjLK6B6rbAUCVMCtBDKbSvPIbSGdV86M7qzzwtuuyK2334asstL12Vwc04g63fTCHCcrtbLMFn0u1tFSe+/acDaM7NjJ/gvwzOKiXTPObKcbJwViK/+krL8Q0y2v3XZzPajXXS/sJgRv+41sBnLPbXTd9N5MoVyGU6q4wgdDAMGKnkPwQORW80x45VvnLe/JK0YOruACo56a7GVlzunmGrMOu9ylP3261harjq3uk7ve+9e/l2yw8JwLaoEAFrju5gJjnovhiVxij+WV16eoEJs0gy+u+PIaaf756Kev/vrst+/++/DjGKWaTT75Pf1SnukTih7xv5H/GIFImvC3JP19KHspctSn6GQnVxUqeZejy6JG9UDaESRXx0scBKvzFtvhKW0tweDuRGZB/ygvMB4E1d1gIsLixQ54JIvgXlIYJxCqpIVnK2HWTpi6o+kQhz6EYdL/dIi4w60whLHKoBE3uJ+z0BBVR7xhEkd4NSJG0TdPfJMNTwJEygnRhDKMShbdtEWSdHFwVixcDr8IgDO+kIezY2MRNYc1N0osjVYco6zqOEUX3lGOV+zgGuFYEDuSEJBqDCIhL9jHQYbxV4DUYxlDYsgqIjKPjuQgJRupyEfWK5KZbKIUXaVEOuIRlJ3UpEcqabpTLnJpoQTQTFjpO1d6EoWxnBASSUnFVl4SlV58ZRsdWD2ada9/CETm9ra0zC6RD1vH/F8ypRm/alrzmtjMpja3yc0izY+ASbKfAL4JTgNmqZnai2YA0ek9MoFTTAJ8JzzZ2T8FboqBArDnpCr4/0thKmSColKUPh/Fz1lyMpi3pBgmaSbJ9tASebZUJV0a2lCoPFSDERUlFpl3SIPy0o8dTWgc/Yk7lVU0KBddYkZlqTaG5nKTH33pgkQaQydy1JIePVUpb8dEltrUpakcpU57Wct+0rQtFJXpRlJqSqNKdIY39WVOR7XTDwayQiRNalC5eFA0OlWjggQqQnc5VJDi9KhgfKoYo1rUqYqqqirsqS7TotWxCpWqRIXoV31KV7bq1a2fgmsNrzpTtZrMrxj1CVN5utK5blSsXgXspgQLRblitaV626oZu/pGtO7Qs3O83UkVy9k/kjSRmbXrSRrQR9aeqgHTXKc6LxRb2v/Sk5pjhG1JH6Xb2X6km8ANrnCHS9ziGhdGvnUIAH97W9nur7bMTW5JpPuQ5tqWuihBgHa3y93uelcg//SueMc7ycKCNTloGq96t8uS9K5XveIZKKPwKV9EFRS0hJVgfRP1z/0K6r5kxatZpYpf1K5OqRdZrFUta97HpjayLCxtSA37yawiVqWkjalmL0thy/30wZ0N8FvzmtjTLhTEpo2whlXr2BZzuK+QDfFdRzzgthb4xAfe8ColfNYOKxSYOWYxTMuK4Gn5eKSYDTKERRxYEmPYxEAeXpElouC4NvbFDlayjLm64iWft8EuxtyFmyrZSVFWi/mN0JFrN2bGZpj/yDpu0JqH2JW6epnLcBYymLEc5om2ecEo5TGB55zWL/s5xilm8mSdTOYbR7l5cU6woG1M6M9WOrRWHW2ZH3VmMqbZyIaGKqInrBLXltXUourtc60b3VVjtyAVyS2m46TqAx731rjOta53zesYvZogy1UudIXNamL/miLDnq6rl/0hpZVXzqHui3vfK972Uhu+7XH2pyFJUoAusL8OFI+2GQztLEs50sQ50JX3LOZR99hBluarvPusZhijmNQOUvde6b1Wdw8a3vqGsoX9TWmAk+fZ9Z73aey85f4E3NEDv/e7833wbVcYvwxPtMHXXW5+H5bgf4V3oT1+8UtjKuP4/3Z4xcmd8LBK/N8U53jL+bzwP1tZ5HSGuM4/rGWNx3zfNGf3oV9e8J8LfOf27nnKDbRymYPa3JDWM3iaDnShi5roIRf5w01u4HNL3Tlbjza3MW7zwULl2AAItrJtjXa1r/2cst4to2p9zl7b/e54z7vezdf2ZKPE738v9tu5x+xz9l3wCLn2eMFrKMVX2+IeFnu5pu147Vq78tyNrwPrlM9wQ/7H3fZvo8B9KgDPuMk1zrrkI6/wdmO9xCrO851Jznra9/D1T469gKdc8tWDPslen/2Qd4/usXPdXiCHvaLNzGg3H93kKJ/4Zrvc8KB33PpXV7r0h09j3tce+093uf/2YT592Vff6uEH/8dx3+jlc7r5gK769V0//qKXn/hfTz/6jZ/04J9/qZOmeq03fwSYff7nc/fXfcXXewM4c/R3gEu3Y9SHgOrHgLYXPMmXe+7HKJ22R07Hf1CXO953EFVmdh9ogTWXge13eouWesrne0gGfWVXWYE2gRFYgPqHg+tXfwKYgKg3gjEIgzXVf1EnfBJofhS4fyiohLikgs4HE6hGY1G4KXRHeGxXeFcSd3F3eHvXhV74hWC4d4d3bG5XXWOIhcx0hlf4IQ7Qhm74hnAYh3mBed2FcDnogHZBeZV3eXTIeVChAAMQiII4iIRYiA7QQKVnhyB4gWXhbff/RHoUJB6AWIiUSInFlH+LWIHf94BF+H8XMYmVGIqBeIlGqINLaIpNyH5P2BKgKIqVSIqeeIeymImcKIIL2BCt6IqFCItJiIq/J4NOGH8zkYu6OIi8eIN4mIyzuDzBeHPDWIyvCH/OiHQN2G+qKIwwQYzQOADHuH1M+ItCiFQziGZ/uI27KI0mKH/KuIMQ6I0boY3Q2I3kp4ngWI3s2Im9+InmSIjyaH/0GIT2yIzXOI2suI/GiI40qI7LmIo8+IInAY/F2I89+I9DGJAM2Y7z6BEQqYsS6ZCMmHNk14zp+IwGOYoISY4nuIlD15AaqBIb6Yod2ZIWWY8fCUsimZAk/1mSMbmCMwmQNTke0ZeR71iSJumCMvmTiiiQLMmTJPGSoriTqxiOI4eUcvc1mpaNRMmNJ+lpLLeQt7eUUfmQCjCWZFmWZnmWHaAQUzgpVZiGaNglh6eFsdKWXRKGdnmXeJmX2aSGVmh4b5lOf9lOfLkhfahdjFeYlvd5PkmRSaOHjseHdKh5pcd5oocnpteTFUmVjrhPkBhQ4lGCOEmNVBmU/niE+FeK63iKqXmR+IiMEwGaKKmQtLiSGFmaAGiD7riaNMmYNjmQI6l7CoiJqumVi1mLJgWEhRSAHsmb8YaZ4niTsblpHLiVHiibw3mPtiicDQGbXJmSu2mcPHOVG/9IKtSZlCqpmxjom6E5noPSgeb5nb6YnmCJjewpKO6pmJnJnLM2WOLJgsxnlEypn/j5nOoZncD5g7cIn+iZn+D5NP2JZ6cZi7O5oCDJc7U5kaYZnKhJnAwan2wGnd35ZhGajxxaoVIJlOMYotJJngAallTZda2Zm6+pnEcpoDCanRtKZTQaoN9YnB5KoPNJkKXWWq5Cl4C5hn5pa3JZpIAHa3r5pFAapVIKJIMJl4GpTFVqpUjalxsybonhmIr3nj5KoVPJdFa0mQQlph1Kps2pcjgmodcZp+dZonERdiEYOngKAYCznieqpiZqdCEJJz4TM795fNZpGnYqfm7SMWX/Y6CG+nwbF2Xr0jFUo6KiaaM496cCGiz70jEHQDSO2qcDWqbqkagp6CZu46mjY6mPeqmlSnV44yZh46nIEqrOSao/iqtgB6tq8zww8jLcsqp+qqt02qZmCkozIjTKAqrVCam3aqyvikm++iLAqiyVOqzQmqvZuqtvSquN2qyuqq3iVkJ9Ui2Dyi+FKqpdmRumyokswy97yqrq6p2U0a60GQB8wy6Qk67Piq3j+kXlSisRMLAEGwGVaqsvSq+LYa8GSKITWqzY6qV1hpyaiqlsJLGxqp0KCrGjmnZX6lxKOpdNCmwji2yIF3hZqj1TurIs27Irm7JYumoGMLM0W7M2/3uzM6sQOLuzO/s9HwuySVoiGPsTBsAARnu0SJu0SssAOsuzTkuznQKm1+av2UaumMIA5/K0WqsQHFCZekK1mVqx4ioAWEszWvu0CjEBzgM9Ndqja5puvHoWZRsnECABdnu3EhCvnna2Tpu2eFK3K3K3q8qn/Vq1AHu1cnKu2xKFTcu3OOu3ceIzcHMAQlq4YUusclocc/smjDpYjtuzAqC2cNKpBzC5H0O4CRu2DBsVmxsAk+owB9u4n1uzkPsmjNM4ArC4qGuxmbq6l7K5+dox7oJms3uztYuvyjK5n7q7biu2U2dFm5uqHbOnslu8BnC874q7wYqwvItzvqsbc/87q7RKvNYLtaFrLFHjOMqyr+DaqloXt2kxt7erLC4DM9tCNdVbvLU7v9o7NfJquZf7vYExt6SbLECjrMnCrPk7u7UbvMhywNzSqGAbwPDrG5uruMp7rQJQvuYruotDv9WqLKY2wapbwW/Ruumbu8lyunvLwdd7vnBCq/FKwr1rwqfRuuc6uZ7rwscbAIrLLfyaujUMvXKSqnCjtwv8uT0cABngMIP7v0LsvTYsF62LvP3LvuTLwUssvstauVH8vkQsJ7ayIrUyhUnsuEscALdyK2bcsf/6Sn1SxVA0YGfMt2ksow+bx7AhwJprRHS8wTwMwxo7psU6tHI7KM8TPbcl48Iv7ME5mrmE/BE/22q2JsdsWcdnqxAbILIn66SdTLKfTBEBAQAh+QQACgAAACzJAPsArQD2AIT+/v5HPIoAAAAAgIDQzdzp6Ok7Ozv+AADb2efI2dsAenpKQIk8OkhHNn7R5eWJJlh5K2S2FzbDEyyysb9mMnMsAAAwKFyqAACnHEHYDBzSDiGqqba+vMW70tIAAAAAAAAI/wALABhIsKDBgwUECBB4sGHDhAsdSiwIkeFEiRURENjIsaNHjwkUFtD4seTHkBEvYlR5USHLiS5fOowp86BCBAsC6NzJs2fPBTdz+hzqE6iAmkhZ0kw6cClTp0kVEiBKtafUqlgDQGXKFcDWml9lhn15NSvVsmaHju0q9ijbtSrhthQwNa1aunbvst3r1W1XuTD9ckWbdyfhwloF842qmPFewBIPF5acF/Jim42RWm64GXNdxDop2+18uWlmsKfbPsYLOjTr1qRLxyY42/Tqz6BFp63Nl7fv1GRf5xaOmPfq42+BKyU+mXll5aVnQp+L/K/z0dd3T49u8Hf1wdnN6v8Wv5077fLSvz8Nn3V8e/TmvSe/3do17uLwucu3Tr++e6zGzScgf8nd15yBz5mHmnqOFVhfYghip6BqA4LXH2zsAZhfdPtZ6KB/GVYVIIEkemhdhNqhSN5eFiVVUVcvchUjUxAlINRwNoLYIlIQKeTjj0AGKeSQRBZp5JFIJqnkkkw26eSTUEYp5ZRUPrljTTO6KBKMW8ooEkkmhUkASlfKlJGYYibAoGYbFvQbTq0ZtRqcoC2w5oIVrqfiexeCdieFJer54G8IPmDooYY24NOfwTG63J4a9qnTAwdUammlESyaZ4OBMgapiG2eZ+ClpB4AQU+OxhWqbR9iKGkEpV7/mgGqm7KZ6lyfnrVqX7hRECupD/B0a2DDRhairn1K8CupwtaKZ6ds5koUoTtBsCypGOxUbHrOtiWtXq1mQOoFFVwQq7bdNpruo4Puihall1YgQAWlSoAutM+aCN63PlG7E6k+lqqoTtty5u7B/FqFME/WWhrwpdneq+96BXvWrqTKVvqwpbTiC+jEnl7cqk6+aqzQpcE267G6K7MLoqQBwHrAxrN2DLKt66qaME/+9hRBBD7+HAEFmrasasXdHTvtwksLUBXSbh4Mc4IjK+w0VVCfl7VpOxvG9FAWCGDB0zlTV3ZgXbs2tV1lvpQlj13SGLeWAuSIYdss1XgjflX2/+3334AHLvjghBduuI94q/Q2lnPDnZLcC4GJ5klfTm4SmTBu3ZfmQcXZeZ0j3pwvxVUfKPKER59t7Nopno46sapze2K7aSf2eupGmz07iLWHTnrusO+OYe+7ci516c8Rf7vuon8sKO+uL4+Z8ayTp7z0sjfP8r60R4+91rEbXP1713+fNPXIY1e++eADv3r6rb/MftToCz/c+vN3+Dv30Ms/P6vuy97zhuc98+mPU/wjoP/+d0Cc2Q8/+GNfA0cXsv656n8A1N5yxqehCPKIS4+jW+Iusjgz/YcqDTghUVIYQscd7oUwjKEMZ0jDGloJhCOcSAnd1jjGtdCHOcRID//NVD8NHm0kCEiiEpfIRCa6BIlNjGITfYfA/VmRMXRCjFGyWBg5YXBz4Zse/KxXwO9N0HkVVOAFMXjG7Q3wfmXEXhs3OEbyxVF6c8RdAuG4wPwdL4AGI54HzfhHI+Kqe32UYCGvGC1ErpGBi6xiGvn4SD8W8Y0QvOPy8sg8TJoukQaMpAP3mElQEvKSkyxlJRWJykZacDhfBCMgxfjAT64ylK1EjSA1eTtOBo+UtoTlF335vlomj5evI6YAUxlM/LAIh9D00g9NqDSfsDBtLAyiQ3pkw25685vgDKc4haTNhwyRh9NEZzkRcs68tVNx7yRhLtFoKyhK8Z4IeCI+90n/xVEykoJs4mJetri3gfYToAilpy4dKUw2ijKhwdmlKeX4UIV6i6HOHGZF3cjMY04UjxulozHVh0zUKVN8dexgSSd0UloC06O3PGUYz5dSUA2SovOMKEabE8uW0nSk8YspTmdKv5rq6qYgzanL1NhQSCpVZzulmkOfeshXZnSqRG3fS0n60U2GVI+ehGlTLZnVDIaVq0JNallludWgjpWVa+0ZJd/6QWmu0yA7dGc69YpNFQ4lmyAcp2AHS9jCGpZJd6VIPHW4WCHuFZ6PJWFjtznZh1D1lxSz5z6neBTNbnaJB7UoR/2JxYLahaCeK15cv1pVpl7VqavloE1XqiCf/xYVqGTsai9Zi9mOopWuMp3lT9ua27R69bLGkqhxd4tc6SgXuEMV7m2Ja0fdJpO3xaSuSq1rUuwu05Wu5alGmxvIqEpovLE16tKQetz04ra6y70ueS1mVfFiVbpaPatbX0tW/JrVt/u1L2z9y1b9Fhe6aiWwXFWJ4PYq+Gtz5a+Logm5xBIkr5DtazV7AlhpHvbDIA6xiGto4YFgWLKRZWyKHVtiAJxYxS1u8X9JS+MFefazSdQnjjnbU++i1DoCPe3ntBja0UJUpNqdLXdZ6mOXGhi+DWaue5N8VNrGp8nDffJ249vd+SbtuRLG5ZS1rGQuM9nLUQOzgPtryN6CN//Ca4brg2Vb5SXXFsvTJXOdzXxnNIvKvNrpsZ+5BugVoXfO6r0Le6WM6PduOcryHTOADxzm4LY5u3per5X1g+f8ThrKlY7upb+70PpKdcCj/jGVNW1ny9qVwiIEoYY1HOMR2/rWuM41lGq9Ysr22py/Zmew8VpZYcd40AV2zI1xrOMdg1a1jU41ZoKcFtSCDtrSzrIk38zgUCc423n+9KO97WBwe5rbzYyzmKOdaUVvmkOdnnGpw3tqNv9TtEuFc73lbG55X9TU571vv5ONbrGSm9EDX3C6973uhEO42+q29L2N/G96BxzVE0dyu/u1YYUJWtIF/+3BI81ucZcZ0l3/Bvm89X1xe2/7yFAFeKAP7XA6s5rPV0a2X93dak7rvOM8A7rXnvnqoldY1vW5ZtKLTWxdO/3pUM81r6cOa8dR3egTXvZnbXtumHfy5TKhtlm8WPIa4xusSVn0181O8bZrPO1DwYDQ5p6yoP8c2QaCV6wGNnSVn33tXp/IqJaVKbv73e1oZztL7qP3XxGt72UPPKbB/hLcNOBaldKA4SP/dzdL3iG4kRnm6851fyMe8J2XyGdKhvlKQb7mh387UuoSNiGVq1Snsl3sE/95VTOlLkWi16U0r7bdo/70npcJ7Yd0+0s9XvecR/7kFa+S+7S+8MWPvux772S4MwzzHjd+//KlT2rl+yRjsap79mGvfd6bvyesLxXH7y5+1Q8FAog61PNfn3H3k9/3lLd47xZu1Hd825d6oDeAXYeAAFiA4yeAPadtDjh93CeBNUFrVQdEGUhNVwc5UfeBIBiC4cRrBlCCJniCKJiCJagQKtiCLagQaoJ1VreBbrMaBsAAOJiDOriDPMgALOiCQGiCUqF1m1V6BEdIDNAuQbiECsEBplVtRlhky5GEILKEQagQE0AVYTM2yFJ/d0aFPiMBYjiGEqB+rmGFQIiFQxEBEuAjY5h7m8d+kASGO9F4pMJ3Z4iGLygAWdgT8LIxBzB/XnhldKgT14J9eaiHKaiGPHF5Jv8jABATfu13XYWIAZi3fz+oiCjIiDsheoB4AHgYhdi2HnQYf79CfImoiULIhzwRf59oL/wXgP7nVXSIftdCegKgipvIijuhAZfyiaYSixNYfiAFhg3TeoGYirrIiXYIjDUDfXLoR0lYe7ZnLqTyeJmoiwbAiaL3iLGCjfF2hBSVhEYifJYCi4mhjavYhwFgivJijpaCfaLYY9NIJM1nKc+XjcvIizrRjeRijZfCd/OoUYXYehGjjKrIiYY4enHYfwa4W4Voh/IXdOq4gvw4KeDXkLL4kJR4fstihvqYkBepE+LyK3C4fg75gBDpE6ZoKc/oNRW5jSMZAMdIKugojBVASIBIeH8Q0JM+CQF4iJCaqJDV8pM+GZTQmJIUOI5KWJFEGXEM2H2hVIjNEZNPyXA5uYDF2JTqeJUtt5EqeREBAQAh+QQACgAAACzJABcBqgDbAIT+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2ef+AADI2dsAenpKQIlHNn7R5eWJJld5K2S2Fzaysb/DEyxmMnMsAAAwKFyrAADaCxqmHULSDiGqqba+vMW70tIAAAAAAAAAAAAI/wABCBxIsKBBgQIEHFy4MCHDhwQdQoSYkECAixgzatwYQOJEhhU5OhhJciQDjB8fekxpcCXLiApfthRgkaPNjS5lAgip0QGCn0B/QkCpE2ZRo0cRxkzK86bTjkuPNsUYtCqCBxeTKtW6M2rRnDKnPuUI9qVYCFaDXsiqtSxLtynhfhQ7VqPciVMnpK3qIADXuxS96gSskmbdm4RBGsYoYW9Vv20Fh5VslvLbxYdxWo6L+YHjqhj+bp47Gm/pwDUz2z1d2GKFhLBhU7CQVrTtyKJTq8aYuKHh2MAFULAq4TZT1opz716t/HVw4bSrMjAuFblv5ct5Wz841fNnBBggH/+n/nX7TN3Le3PHfLHxZ7bjccs/jn63+vMa9TruK746+cHmFUSXavcJyN5FaKW1Fnz+zdcgfdlph51GEFRoYYUTEBXfhg9WVx+BAcL04Vj/TVZiZSMeVqCI2Z14mYucpVjXigMNSKKD5cFImoxj0agUjzYZkJQBCQl5FJECGFkUkkrqhGQCC0QIVZMyPRmlfc9lqeWWXHbp5ZdghinmmGSWaeaZaKap5ppaUvkSk0MWGWeScxpwAAF45qnnnnsmUOSdfAbKp590cgWgjqaJdsCVyy2Q0KIROhoiooFR2pqUUAHplI8c5ogjgJoidiCWhnra6aGRhWqTjSpO+qmJr6L/iCmrM7p6Kqy3yiolrT3a2qGpv4I666gglopqri+mOqyqZPkK7LPHerjssMbiGqy15TGrmbbMVZssspxNmB2vT3F6bWWWKsZtRuRu6my00GIr7K7EZmZuvOjGetm62vF70b3wBizvZP5mSq234eqbMIT0FgzwwPmCu+O09CK88LnfStvwwRZXqjBp4qZXb6sdJ/qxyQxH2K6oR9bp8pFywjzyWAyszFHNhU7G5s489+zzz0AHLfTQaLrJEpwyG50S0kvG3HTOTjpd8nrpXoebnQdkrfXWXHPtENZdh931wxFjfHF5kGYnadqNkp0xvm/Pq/LMvU5dWNVUK7txxXZb/y0xyhrP7fC7EMddeIwUz92332aDrLfgHC+O1N8epzwu3eUSXjbcZ8t9+eCSz4S36I9/HnnoXY1uYMhYgo56jbZyKjvm7rr+euonVx646XzfPrvqLO6t+O24U3536SLb/vrvuV8q/OXEF9844NkmDj3xzBuvrvUiR5/99KhxTyr2sZfPOojKo/495xM/3/2SLz+t9EdMR027TTj7i/P8hRHt//8ADKAAB0jAL/EPIvWrktQUCDUGHvAhCXzTAiW3PoGhC2xiy+ABvqbBDrqtc4dz3HHYtpu1MaqEHxQh+I5nueSdLnQVDKFpBpc+GJqvedtz3/h8d0Pt+YaGL6RgD/9XmEPI9W55Q2TfDMVXLPIBD3bns9f9VqU5w23OgvtiohS9l0QsIk6HTeThE7eCvNYFcXExvCLBtEgyJ+KQcdUD4xbd6MO8tdCMR1RfF2UYPjm2UYxvtOPuXJhHG45RenE04vAAWUfS3RF9Z+xbGq2YRT/WiouHnB0QC7m0+NnvgQyJ4NFstpH8SWl/5SmgKlfJyla68pUTHGUDJThLWYJyIaJcWix1WUtJ7lGNF+ygBjkoTLGlkHpeVCHaTqgaE0bqmLpTYjQ9R8hFIjGT5dukNfWIzSi2qoZC7GYZIclJNP6Skl9U5PUYSUQ4UhOP2zRkIB05SHiu85rzXN04pQj/TnOK85H8jKTdJglCXanzfeyUJgvrSc54hjOfk2NoQMvpy39K9JsCnRpBlfnOht6TmxCF4j4xStGBnrOglTzoDvHZSH0ClKQO9WdIyfjSWvWzojNFZEcnGtOJ3PIguaTfLoXaS6Lqj5QaQSWAYMnUpjr1qVDl0k8NElSfDtWqRcXqVAtSVQRetWQbRaaJMFjMrRGzrF6rIkrXytHJkLCZj2JmZiSFyZxqko2XpGM7BZlI3vXUpBbtazU/Ks+WRlSw9kQoS/dKT8R6VLEgNaxIa9qrmwLWrtnEa930qtDkjNSmGQXrSdtqUL8S9qGSpelFQVtSjY5WrKUdLGQLy1iX/662sqHtWFinuUZLbjahyYRtSk07W9TW9rA7helpZZpanfZWpWFcbGfd+VzirjSyx53sbTNnWdcGNrms/SsEPelA8tLyqFMs5Vd9E9X2uve98PXfVgnSVQiuN5T3xWV+gbpfqvbXWLtdaI7IitYNKoTAaIWmgIPL28q8da5xfaZaSYtOCqfTutHF7nT5Cl7ctla0362ubK9L2w03tsPcza3FAuxZyqb4w7p9bYOHO+IMl5jBCxZxYklsXBPb1rE8XS5Om3tX32aurkTOrJHdheTsqhbIyi0uc53s3Nju2MY9xnGLt1s7FSOMxdS18mN5PGUfI1fHY8ZymbUcZhpfef+OwOVjjsUcZCkPmcpFhi6cpctmDqO5zmT2qXltOWheohe98+1KfBfN6EY72kuJFkh98ZtVr1bavpemdKQBMGmgFjitIQZmhYULkQcfhq6YDfWoFyKj11SAZamOtWE/hIEKwaZC/GGXjOe8al6zlSH18clPYAOUk+ha1b+ecbIPUp+gEFso3Uoysi38EPQIe9gJCUqGJCRrPO/aILphQFWe/ZMMHLvbZtaumXWTIKCQ+ye5BrOfRb3sLb8kNfpxdrYf869vt7ne/6Y2sDGCAau8+ydYgcq0SS1wZVcbI+KuCgWEUxVz93vhDme4xlmdkWv/xAKzqcq2FY7uPp9YzuD/1sh3EDAUbku75CgvCHq845hoexvjvn64RtyTllxfHOb0bnjOB56fmts83U82+Y+VDoAPPaAkJBm5y28O9F6nHMZDz7jWs871q4t3410PONjtzZLujl3sWyd7zAmC6EITddOd5m+m9Tt3Az367njPu3vhnpAC+P3vgA+84P3e98EbfvAJSYDbtbp4CH7arAI4vOQBX/jJS74iCC6rvE/+EVPXRVKWn3zlQ494AWxArqfe/NJTMjjSH370rqe8ACJwE1czueoQ+RAEJMD73kvA5/2OfemFL/iE0H4ju4dN7xN+7pfrJNiOMbZ2iB942BPf+BsR9rs1g3OvU8UxLZ8+//X/bn3hYz8jEUfAu8PTfKo/XyMF/wzzoTJ+8ke+/gU4P0bafXDpk9z5MhFuK7cgwYd/5Rd7+hcA+aZ++wYUEtB+SFdlE8FuK4cA8XZ/9XeArpeAGaBvAmAVzKd6Z/YRqUFzKyd+GYiB46d/HseAH1gVBCiC6lZ2F+EcwRFyVZFwGkh6Oxh6+tdu2PaCIvdzAHhv/ZYlwxEUD1iAKYh/+Td7GLGACDBxSRgU4SeDScd6NYiE0QEUI9eDlgeGogeF+xcUINeFxTZ1Ebgi6HEB3xF+9GeAKkh9CRgAK+dzWCiBuacRJpgWq+GEYnh5ZIgRLWgVR8d0M6iFO7cffyiHgHA4iBjhhnsxf//nfgG4EVIIFASIgis4h9cHiRfRhw7Ifbinc/nxAKiYig8gdUzYiY94fHyoiqnof0RoiUZIL4/4il6GdmHHdsOSi3IIi0IGcPM2gb/oiMG4i8VIjAOBXsCYghpwSv9FX9M4EHFHEAEBACH5BAAKAAAALJgAFgHbANwAhP7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/4AAMjZ2wB6ekpAiUg2ftHl5Y0lVXorZLUYN7Kxv8ITLWcyciwAADAoXKsAANgMHKccQdEOIp8fSKqptr68xbvS0gAAAAAAAAj/AAMIHEiwoMEABAQIOMjwoEIAECNKnEgR4sOKGCtezMjRooCOIBUSaEhy4EaQGEU2rCCgQsmXBhMuhMnwJEqKNm9KzKnTY0+NAkbSdPjx50SVBh04UChAqQMGQ0nKjGqQZ0+rOrHe1IoSKVWTRY1G9DrQAQIETM8igPD14NS2ArmGDCsWgNyOdzmS/Zo3494AatOqfQCX4Fu4fVPSFZsYaN2xQQsHaIwzMkEIgRWqRXBBssDDbSkfXWxU9E7SP/9GNQ1ZqMAJmwWrdeAZNF/UV3Fn1b2Vd1fLiH3PdR1AQmzNmxHUViiZtc/HdoXjla4XeGjqfoE/SC5bLQbJtqk6/48OnTz08aqHogd+gTvy5OCZF15fnv5567frAze7mYIACskhIEF8MwWn34H36UdcfvcRl5wFFFgQIFSFhbcadooh+Nh6C4qHIVDEbRdgct8R2NyHlWlYF4cnKliQcSOq5RlC8hmY4I0bojhahxe6SBBsMSJA23IFXqciYzqe5qONOXaI2YidzWiheklCdmRpVXrEI5VLXgbBl2BCMMGMNBbJYI5XppZldFvSxCKZQ03p5pr2odlgi3fCSZOcMNW5Ip2AttlnoHruWaOROP6ZZm6CvvRmoSQtkMChtxnwmAEKWVoXpgJoKhannhrFaQILzNfppQqRehtTrLbq6quwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWy2oCBCSr7LLMMnvAqM1GG+2kp26aKarVfnqttZ0eIO23ylJrgLfgfiuuWKW2tcABCqX71QJ+Irnobge4SxW8AtQLF751mWjqvL0B/FujJaU3Z78VUuqhwHMxPB3BJBk8KMJw8ekooA5X1+VtEDdkmr9MKpqovHkG13FNSYKMqJ0si9zkiScTRXFbFheM8chYbuxhzFWlnLCZC+OsZsbZ8VyQxBfP/FXNEd/cMskvm2o0QR//jOfTOQvNKMxX/6TymS6HDfWfU4NVdlw+V6xwj1rvRrRiZ0+GX9BGfU232FljrWbcSNv/rDRVTHvsNN5Dl3wd32nTvDaXbQfc+MBc/yuW3WzrndvbIEYeck+UM265248Pp/nKXlst+eeOow651F1zbvrmeRN+ueEct65T5weHPh3mlSGOeF0UKi5A8FQxsK222Yp6vPLJ/8Tp2cbPPVT0oe5m7PXYZ6/99tx37/334Icva/U9gcot+TqZfz626N+kPvLto/R+TwfUb//9+OdvqUL59+8/u4MbmwBzNq7/GRCAACjgAf1nGntFZV3t2le8YjdANelLXfxz4FD4NbnXka6Cs1Od6Fh3utKpDWiVkx3oRPiw0YHNdSe0XeFYqDHa7UyGKMHdxHRXQxoWzYV3g6Hw/3C4QhWmjmxATKEQl7a43PkwQzz8IQlhdxMdJi2KUHxi5qb4wdt58IUUDOMMkchFMFbxi0EUYwiNuDqTEbEjVvQbFh0zx94l0XMmHGIJQVhEPvbGd2/kSBybxrvRFPI0gNyjF2OoyDH68Tc6u9DvOshIKq7xkQ2zoSQDmZFBCu6QVqrjju7oxDwyEYV4xOTuRIlIUu6wbmhUohr7OMs/uvKKsKxkFx1ZS0hqkkqTzKUeLUlLXhpza2VM402IBziFMHN6y3NeNMs3zfRV0319a0j0oHfN6Yjvm+AMpzjHSc5ymtNX8QPJ/KjZPHamsyPrdCf75rm+8vRylVpMESutpP/ABebvIf305/3Gw0Z8FtSgqqzOBd+VQQmu6aDZAeVz8jnKZMrSnr6EaET3OVEyurGRGEXoMYt5yXu20KKpDGkmNZpFltIxah8lpkpfmtCNUlRJv5xTMGe60pq21Kc0RVIiZcpTnLpUnzcNJUwPx8mimueohuToUz3KVJA61ahARSpUsSrUW8rxqjbNalST2tGuorSUYNWqSXu4VaVStXZWTatzJjjSI5o1prtMa1nrmtG27nVvXiWkXoPK156KlatYGmpe9TrXAK41rHetKlGv2liJTvWwrQnsJwer1sKK1LNsjSxcJ+vUykqVrsjEqxkHa1qyXvaxcNMsyjZFT+b/vZMj8bRmO3V724w8rzDbBG43q3PO4hr3uMhNrnKX21uM5FZ+w4VndHE7Xd9W17nXrchzobtbkAi0f/vL13f/6VjQQpaA48UfQNM70CRpkCYQFMB7YcJB16KWXvN9Cb4Weq+q6XK1JA2wXRMrW5lRcpiLHbB5f0rgs77SlM1EJVphS9gFb1G1ysxhLFNq4c52uKIYvqiG/5thAfcVs1oqcM/+FpXAzfa05U2tZBMsyA1P+MOIxXFmHYxLCLe4iQ9G8Wt1nGIef9XHcQJyj4V8X1saWbDCPGVTQ0thD8t4tDTupI2DXOWx+pVNKj5a4qQc1xN3OcdXvmGZtUxiEZvY/7Bn3nGIOTxiBANYwSUlMpifvNkoR3jK59Xzo2Z85xq3mc5v/mye07zJNWPEky+2b4x3o9hCs9nOJcZzoqkMWD5HGsmGkjCXBT1pJ8/5xih5ZpKHBxfq1da2r5am9GgS3FZn9zTLzbWud83rXvs6Vs3V7q0lsl11DpvYx45IsaXbXe4GmyLLxkgDpk3talv72h5hb3st22RIBnS869U2AsWigAGY+9zoTre6GxBBDJbazKTm7wMb6u66lFvd+MY3tQDNYFJHEpj8lsi9801wc+/b0Zxe9KaleOpRo2TgBc/3wUmb8IX3m9EARzhFIB5xdU88y4q2eIUxrtOAR4TjHf9H98ctHXJNu7yNhM40R1CecoMr+chMfvcIG75kndC85iuXOZz9ndNB7bQnP0950N388qEr3NQxZ/rMa67vm0P5y93eedQR3ZGkd3zpXIf308VO6TBTLUlejzjYUS1yK7cdxFtne9ep7nGr9xnrOj8pz3Huc7qne+0OfzuaBS/nuAd+7n4/N+B7TnRJV1roGEl7wRfP98bj/fFSz4jkCU75q+ec27MuucYnsnmJ2/3Tn4cx5sOu+cQr/vQGljToVy/3qbt+AJ2/e+odb3aw2FsBwA++8IdP/A4409bNNnbymf3siUTbuqF/Sa3b4mos/fr62M++9rd/veYje/nQ977/spMNkedjF/zhj7U8jSLuZ0Wn/Qj89nez3nJvtz/c2m7gvgCY35Lst/+RQn8V1xvytkH0xlBj9mcOxhIugXpx9lckZ3QmBxGQRhQ8ogFfwhRfMiRnB3r/JnoURxEV2DMdIiJo8R4rBmMfKIGjJxEjKGYdchwCoBZs0YEqWHQXc3SLhGmNVhYyuBlj4ns3uFRYxnIV8YJntyDuMYNqkQE2KHs4aDM6eEaHBoIB8CSZwYSzIYRQSIRqFoITgYRmMxBA8oPwgTYeGIVNM4V1RmYxhQEB0h1nQRhyk4Ze2IMgd4RbloMDwQAB4h8AshlRUodD+FZfmIciuIdSSBD8oRYQ/yIhQMiFeLeCfNiCESGGaKOEQXIWNYiGhShah2iEiViFLOiDm0g8AhhoEViJYOiCiriGBpEBQcKBntiFhoiHohiGryg4HVKGAZKCtgiKuAh5ekiKrEgQD+AUyhiETziJasiLEwgAmCg3bEh4EFh2nhZ7oAYTLmaB0ehWOUd7h2doPJhxreh2Tad3hsd4qfYzqgYT0/cV1Qc/6qdb3BR9JTGPucF9/NiP/viP3Cd+5Ud+5ids6Hd+AplA5KeQB2mQCVmQKtVazhiMDzhkrJUlqXhx1miRG0lQKJaRI9eReTeAYCWRuzeRKMlZ10h26UiSTieSluVXIImOLFmTL9mSPP9lkhU5k152kirJkTZZfzcZlC4plEZZVDppeT65kw/1kSOpijD5iSqZlGM3lFZ5lFCJkzNFlVFJkUo5lRj5lBqplSFJljRJWWFphynJlD85eESZlW85lnFZlnMZUlxplj3JllLJWXdZl3n5lYB5kXtZlVgpl1dZlEiZloPZlWvJl4rplYSJmHB5mDn5mI0ZmZNZmHTpW/XobJ3pmfVEj6EJa9YHkKZ5mqiZmuH0kAS5kJxSALAZm7I5m7QJmwpRm7iJm6nymcrHmg0ZkeIlbuuRm8Qpm7dZnMUpEvInUDzpliUpXw6lH8iZnAIwncSpEBwAgA1RX5fJWKLGjrthnbn/eZziSZsKEQEr0RKep5dtGR0HgQESEJ/yKQG0WIt/Up61SZ74GZvneRAQIAFMIZ902IxL6ZgGYYITImbSuZ/GWZ0Myp8CgJ4FwR/dAYzdKVdAs4kDQqBY8qAQ6qG2GaEF4Ydn0R0lIokFepETuokIMKCEmCMgWgD6yaD9eRlZmByoKJabWVpFQqKbOIgvep8gOqP7WaOvYYZqsaH2maLeaaMsKiQo2qFD6qAeaqQBIIs3mhwD2pzgCJZk+KQysqRSWqVU+qBG2ogl+h5qMYhcupLPKRAs8SoREiB0OJxTGqNGioVpqoWRGKRMiqFoAyuB2IRRqiZ4WqY0KqJH2h//1hEgndimQPmmARCnrjKnfeqnY2qmiFqkiioQevqICSqmgdmkBNEem9iJmGqodzqlEjoQT0qLkOqRWFIQCDoiObqgZHqorSoQaDoiCrqYgFoQepoc9WmnucqqBmGqI+KiqYqZhsmjBuGjv/irMLqqZLqrAlGrSWqhfyqpBTEBDxCu4voAzFiouXGounoQ4zquzxSrTTmrljga6Iqs8eqclLmV31l552qtmoqtxJiZkomv3+gR83qtAxup90ps2PKO0ueaRIqfD1ueCrEBwvWbzuewFgsRAQEAIfkEAAoAAAAsswAWAcAA2ACE/v7+RzyKAAAAAICA0M3cOzs76ejp29nn/gAAyNnbAHp6SkCJSDZ+0eXleStkjiVUsxg4srG/whMtZzJyLAAAMChcqwAA2gsbpB1D0g4hqqm2vrzFu9LSAAAAAAAAAAAACP8AAwgcSLBgQQECAChcyLChQ4UIH0qUGHGixYUVL1pESMCgx48BMmqkKKAjyJMoP4oc6XAlS4YuX0JMKLNlyZQnY8rk6PGBz58+GeDMSbMmzKJGZyY9uhTjzaEedb7kWfABgqtYr0KAqhJpUqkswY4Uq5EqV4JkL5odmLUtAgdnD3o1mnbj3Jp1J66Nm5ekyYEQ3Ga9EBft3Z2HpyYOu3js08IhG5d9HGCC4LYPIEduqpRz34efbf4tHLrhWgmX22ouzdSzZLWv7Y7mG1vvYwep22KAzNopZwC9OzfdezY48JsVECpXTsGCYN61Sf42Tp0y7ek3l2sXQMGtBOjYw7v/xj67eHTQyLcjbO5WKOnzNsUPh2/aunnyBHHnRrAb/Pj/8+G3Gn1HzYZabpptBuBS1Qno33yzWZZaZgPKxyCBvrlWHlfVlReYYIQl2OCCX2E404ZQdegRBCy2yOIECSoYIIl0mXgcikOpGGOKNo44I4Qi9mjfjiD5eKGFJeKIk45EpmRkiUjWqKSTQk7ZpEAIGdCUAVlu2eVSXAqgJZgIJbDAgGMmFaaZtKnn5ptwxinnnHTWaeedeOap55589unnn4AG+icBhBZq6KGGJvClmosaFWaajjZa06MHIGqppYqKWemlnBKaKaQvFbbAkzVGiZcAB5wpKkKpQjYqgQ8e/0njqQ6+ZyVRS8UK5ayI1UrbrUXC+p6pvfKqGLBdIRuVsNcZyxixxwaprEGs6Vqqs475at60cuU6LLaTQcsYt4aRO1C13/4oq7pJShukt82ye628p5qL5ZA8wnsfuLCJ65i9kQGMbrzrFrwrkAMKzOy+9BbbcLQJv5uUtbTya5e2HOKb48IZ+xuuxbYprLC+HYMsncnoiSyxURQ7bPC8F6pc4cTpvlzxw+O6OzPLNR9ss8vtRrxzTS0r5nG/Gupc9EVLZ4tyfEkL3fREhTEgqUyPeimm1qBiTRxUVmucUthdOybo2WinrfbabLft9ttw58no1mTSPXfZLGVdN94j6f99d1IHBC744IQTTurNPxttQOGMNx7R4o1HLvjAXL169MVP19fqqqiqGpflNBPs8+gwBw3dyKEznPizmReodM+hwo446bNL+broseNe++5A2y617CNNjTTOTiN8+spEA2906xkabyvyMgmPOfEfO/8r9LmrTnvvvEN8/NDRK8869cPHfLv22ZdM/vSr/3u++snrzv38y1u/LfYsSW/b5ftH/b3+DQEgevh3MvtlDHU8k1/92lc98/1OgUwTX/EYWD7TPQ986csX84RDQdm8T4MJRF/3xtfBkH1wYyQDYQkLuMKUnXBJHFPh9hY4w5w9UITBiwvZuMbDvWnNXmED4tX/zBa3IhrxiEhMohKXaMS/+fCJToxipOw2Rb79ZoAbPA4BBwg5yUXucV6UnHFGOMEalrFGm/scqzx3FtBdkYRmbGAcYSMzAW4RauvrnwF5hMA3yrF0gCTjZOooQT82r4V4RGR9CAlBQx5yjuyDpAlvCD9HslCSlwwkYhiJQ0s+UpM0BKUN/1dIRx6OfnD0HSkbaclThlKQdHwhlTwZSVGeEZYepKQMadmaPGYSl5NcZSdp6cpUAtMvskTJGFF5S2b+sV7JxBUvsehLairSdbpE4TQTiUlrdlM00QzWNrlpy2c6M5bZhOE463NHdmbxa3zE3zSL2cxXbjKcXVlnL6/5/8ljujCds9QnAPxWxR5KcVLwxEkQNbPDGjHxoRCNqEQnSlG1HdRrVERoRjFqxYsQVKMdtchHWRJGxwmpnfuEUhdLariErJSlk4shTtzITw5+U3NsrNwaXUW5Spazgj/NpTB9mr9S1tKf5IQmQJUpU3W+86T+u6AdFTLVlAZVjw4c6i6LykqgItWde9xYH8PX1aOe06zek6pRHVLVfp4Vqxa8Hga5Osx6GvOtyFyqNENIVLx686p51ao2+bpVvybVsIvE57JSONiaavGpnOyrRtpqU8D+VamCdSphG3tTq34Vm5kN6GY161h6DlKx1GqqaEsL1bDCcKwZ5KxlD2vPtP/KtaqUfWw1aTtKtZaVrWv9JWI9e0+9ijN1kr1rbZXb29sGlyFVG2LepNs36mpkpC8JkxDFhpKGnqqi4A2veMdLXiReNLvW9Wh6RbreiWB3uhtFb3w1AtOWDrS+9n1qeF5aXzDiN6aMTcmr0thG03oVMQTWaed4qlqmKik5FTjubsGaVd/WlWoWDsADWKQcFsGlW/qNqnN/G0AL6wcBysFKalHqVtveb645HHEALoCVFGsFxBMmrosPKM/J+tYqNUZIVmB0rtayNrKF9fGI22Ljq3ynyCyuLGYznOQIOvdDQRZAWz4sI9ZiLJ4wVvKLK+OWJqv4XlHWbYVlnFyL/A//y1l2y4cNjNbmjvm5C/kfA9xCAe60JURd7myL7czjMFv5zhhoiwXYkxUu0xmuqqSybOlaaLbsZytQDrFrqQRbSoN5IEBOjXvQrOk135nE0LVwBlLTn0zneNDuM24+kfvpgUjoOTj2sohPfWGJDBUoQCGyYdLMJEmTlqy8brM5lxvr0DI1wJzuMXPt2mxjrxbZlcYzhQV9ItTmGttDWShkvFtQKE5xu9sNKXrKy+52u/vd8FbPeeGrbom8t7rzxXe9H3Lv67bXIf8N3KOFy+xw8Rem/g14T4dCU26r2eE75VyCodLw2B57ttvGOGit/WxaT3q4sF42oWuN6jxrW8fT/xZ5tdlcZTefPOQHLjg6nb3X+PW6ztRW+WllvViPX/yzMMf5zmkuYZsrO+YpR/rKk91yDJc840CXcnGJPuvRXlvjQYf0lFn+cTEfXehK1/nMOV5zcHc96WBPezDJXnSz/xzkUpe5UNledaM3Pex4V3tg6d5zq3f81XFH+9q5/nav313vl4X7jbw9bJ9fPeoPx3q3eb5ix/9d14BfPOW/bfHHK37g/6R63+1+9pznXetT53vl/Q4ScoPUoOWeG7oZ+u/6xPv2uM+97o84b33D/vXmBn7vPQl63poe8cYXuyGLD/XPG9nhkGc+yo+Pern7UfpZJ7jgtU/9N2I/8N1PvP/1ud/K50s+8tE3/za/j37nE9tG6X+//LMY/1JDXP3zxP/4xb99/od//pnHfuxXfwGof/2XfKZkgP/XfPuHgMhnf+cngApYfgCIeRa4ThJYgfcnUOCnfNV3gAwIgtNHgRBIgBc4ThlYgu7HgSlYgBqIghN4euQng/7ngVfUb+qVb/6mgzm4bw6Bg+zFg0Hog06xe0Z4hEiYhHcyfDlYAE74hFAYhVLohAgxhVZohWXyexylhSMRcAdQHVcYhlBYhWIohhxxcCzVgqQnYGBYhmFIhm6IhQKwATlFcWrodhDWdjUSh1cIh3wYhQgRAdK2gCPoawSBARIgAcqRiBTSePP/8YdT6IeQSIUCIIgGAQGJmImJyGWudoJ0dWJNNmqk9oiTOIYCUIqmaIkEEWpusXqeGGMBkBVN9mSdWCKo+ISSOImBWBB7xmqcZ4JkxYpmxolteIu5CIm7SBBwJhiieIex04txhhWAFmh7eIsFcIx/mIwCcWupQYvUCIyxs4xmhgCNWIyoiI18qI0BsGr78Ra1GIGwwo1XMY4I8I6nYo3XeIrWqI2smBuA5oz5k2hMJmRbNooXgo/oGIe7mBzqwWhtQWQAmUN85mdZkQH2iBgIqY/GWImR4Sbd0RaY9o0ryDP9uGjOMWQXqRgZmZGCyJDb4ZBYMWoRqWQ0lhshKZIqcLmPGnmOHEkQ7diIOCmC2ZdqAXBil9GMQrKSOqmKAtGPreiIryhmy5gVQBmUjqGUG8mUAlGTgsGJVmmDNWiIAgGNTwmVtqiTLGkQRpkV3miQG4hcE+AAcjmXDiBsKckYWMmTWikQdEmXouiW8LgUAQEAIfkEAAoAAAAsmAAUAd4A3gCE/v7+RzyKAAAAAICA0M3cOzs76ejp29nn/gAAyNnbAHp6SkCJSDZ9jCVV0eXldyxmtBg3srG/whMtLAAAox1E2wsaMChcZjJzqwAAqqm2vrzFu9LS0w4hAAAAAAAAAAAACP8AExAYSLCgQYMHDAgQIPCgw4MJFhoAQLGixYsYFQqYiLFjR40cPYqkCHKkSZAHHqokGHFjypUqW4Y0+XFBgJs4c+rUueDAQps7g+5csJCmyaJGRSJN2nEp04sLDwAVSjUAUQFSq1a9+rSjVq0EFn6t6rQrxbJm0XZV+3QhgbFU3cIVypbp3KBhBdzdWTdpX6N/aQY+KuDt3pxydzZYzHgxA74CzF48nDMvZZyDR2ZWGllyxc0eE18WjbMBgtOoT0uA7Lni5ZuWX4Nu2rn1bIy3oRZ+HYD0zdTAETzQmdsi79ija3su/lm5ZOZnd8uWjpNC8NQViDs3e1zs9NbRwQP/gD5++1rqlEkzuA68AWLzT7vr/Q6evH34TH3vJS2BPfD34snH233iEVgfeoeJ9oB/wEGAGX5JCUifbRACVqFgFxJmWHJvWbDQhx9OgMF1Dwb4GnLpZaiZipwVyGJoCO63G4g0CjBBcKvdRJ6Eybno44EubphihzUuJGJwF5QIHo8p/kihk8vFeBdpCzKIgINKtsZkgi/SBuVzXeIm5Vy+9cegdiZehiKXX6YVpm5BDojgev65B+CSJ3rXI5B8PnmgkFwCGoB112WHJp5q6tlkn1G2eZ6gU0oJwaSUTjrcoVrmOd+efnba6J9yQkrmm8ZpKqejbZHaXJzTiQrXjqZO//ipp2CyyuGpiFK2poyo5qdqeKC2imumlC3QkpwzdVWSZ8tK1qxZGiUwFZfSTpfsUxoVqe223Hbr7bfghivuuOSWa+656Kar7rrstuvuu+42BJNDCS0k77wGydTas8pKtK+/zEr0Er75CkwwRAA7O+1dPf10GVeM1hqxm1kVG9XCc0FMrK6Ksjnxeb36NearI48Fa6KbLkqrmyFb6KrJJX91Mscpe7wyyB+n+rLMMWs182G7RtoyhkNrGOqwnm3Ja86+Fr3izj73TNavACgtNNMiO80Z1FNzHRfVVo+qNYxj0+Y1XVJ/nSbNSEt8c6q2Dtk2d7FyOuvdbkd5dlD6Xf+9MdAdL/1201i7fLSskoVNctliMq7b3pBBjmnSdauMN8uFYyj5e5tnSTnKc+M8eNbB3oo43aCfLvrlq4PZuY5po7024DULzjrcmRstrOrxVW7z7YSPbvjudifuu+15J485ha/3FjvfYB/vN/CkC6/54cWjzjbvwVNvYdyBhv7UYyiTfxgDCUObfr8b/dt+wM/rhH78Oc1/LVPZwqv//vz37///AAygAAdIwHDdLyn8wt/6sLVABb7PWQ1EYASNkkAHHpCCB8igBjfIwQ5OJCodDKEIDWQ9wvjIACJM4QaLgkIVqpA8GINLwwQQw7FozHtEy92KKnaYq/BwLzc0Xur/ste65eFQd6YjYu+GaDnlFfGJOsNeE7VHO/F1z4m4Y54UfyfE7SnxikbEoq+a1zex5aqK3KveEVcEPhmRMXpM5GIY5wjFMW4ReVTcS9DMWEI26nBrd5xeF9H4RTWK0ZBueuMbZ6fHwAmSjllcIyCJN8WuKA5mjrMICfUWSD5+zouVjOQhv1c6uaXRKJfkWSZX9UcYKdKKEZKeJ0eZwz5OMomhtIssF9dKL/VSTK88JU1SGbVVAsuWruwkL//WyNo9so6IhObwcClHS+4Sk7+EUzY1ST/OwRKV11TlNlmJTLMpE5vMvMselylJso0zOsEspFHMRzt63sV+7rsgTSoo/8EHqq+bN5nf6/AJvwIa9KAITahCF8rQhrpLnyeZYET9yT6IjoSfFJToRC0qEozuU6MXNeYmaWlCILXQhSOMzElR2kHyiJRqI4Wkr37IsIs9zKXvjKk0a8lJSlZTPKLcaUnL2bhShk+YQPVjO31JVG1q0ad4TCoYgypUpS4VmOcUp1RlSsqmctOY5YlnLrdqVa529apO7Sk1o0rWoZK0rGblqeuyWsy2UnWqd8WrXkUm1p/a1Z1o/eo7y9PGSC3yr9Hc61nf2iKjuvGbiCUnYwEbWMnOFarPjGxRK3tMzhLWsYaFrGaho9O8JtaOmJ2lZpk6Wda2drNqNaU8V0tamP/aFqxlJNlhV9tYz5ZWsddLLTt5C9u4uvW1ab3sWjNL3M/69raDzS3Mdtvc5Fa1t88t7Kio66x8wo+jHvHoRr0Lv4FKVysEhaBD18ve9rr3vfCNb5HA+xGQhte+96UoA/VrQfKql74Zwe9HWCrCD2KFwCFkIYJbCl2vrmqlC/aJSiO8QqrV8CszvLBWfKjhrfz2tCKjacZsajE4gvKoQvGQBdQWXdv2la26jONjFUMBCnyoxpe6k4M7q1zZjhWcMg6tTqqEgA+hhjUt1q5uRTsSYnZtJxVAjZFPgyXPIVewT12uaq0Z5O3qxDRSXkhqkmRl48L1UXSdGiPV6Ugv5wT/OFNWjY6zC1o3z9YkTv6akCAAZzGnJscfXmwi08zidM5lndPFyQWCE+cjww63Lia07M7YTOzxuc8CCM6lAi3XQQsXnZ8kpGzplJoJ2Ag4hnIepJWcaKQ2OZxRExKhUIOBI/25zNelbI9R/ONhwvrJb7ZSjh6d5Dov2dUiyTPaAAVm/9CT08f1tJaHm0c2O9POOIkyeygwuStbVto+9mssu3zsnCyaPUjesXNjy2txA/nEM/5yYxiTY1ybVtBo/rRWQ11pfddV3fd5MXPHDW8h3/nM9w7utEE9yH4vfN/e5jG42w3jpNjT2heHi0Bfk95/Avgi4r3oeauy8ct0nH3y/025ylfO8pa7XFsft0jI8xvzisyc5v71eM4rui8H+PznQA+60KND4QqvOskQXrCCiy5h8ShgAFCPutSnTnUHOKzERwc4VjpMFQ7flGpPp7rYxX4sZOvazNjddbx7bZKwj/3tUC/7wdOO9rPn++H/9ozb4T52ubMd4cCN9t3DXXGj7J3vVPe7uzuda9dOfO2Lbzvi3674wjM+4YKPor/VDJ7DTz7qlR84vjEPeL5KGnpO/zzZ20ztxhe37ubcfKH1rvrEs57hsH+96x93+nTTvvZSD/2WSU/33XNT4MNniudVL/zWE9/uz4893jnfmuV/vvm4N/63tQ/P3nfbLNafPP/2IZ5763I/rN6fs2TCj/jx5z3i64b/yIE9d4+wn+/upz6dtY585ysf+MF3e+R3ftD2NOlnb/8HgHEngO9Xflj2XP2XfU9xf3CXf7Mnfw3GbpBneZKngANggZOmdZAWgQM4gR74gQyofxg4ggdIbNWnADAYgzI4gzS4AQuRcWNxcvtVcyQhYAHGX/1kXgAVADroQC93hEiYhEq4hPrDgwBwcz/ohFAYhTu3g1XYX8zCdAlRHlrYdMUmgkmHYEtXdDD0MBLGdUIRRA64fdEnJiImQyTWQz/jcH9XfG1ofpo3fRdYbYeWgntIgBmodgZXhx6hbKj3hRDYgqpGadbGZHb/GHilN02EJ3q+Rm4SCImPCGKSSHGUiGe/9od3+IAYSIINyGUFl3yaeHmYmEyyF4L81ohm53iAyGo8w118CBeIVoKrKIuh2BykqIK3OBa5WIq9KHGA+IugGGOn6H+pmHm7iFWteIiGhot+6Ior2GLIaI3B+BXDCIyziI2KOIewWH+8+Ix4iFp6qI2mKGqRF4mq2IwGGI2+13DjSIjlCI+ZuIkb2ImvZom6iI/QZ47HF44mxo4c6I7OCJDSN4mo+G4GyY/5+I6jh44MyYzzpCk4+BVF2E9S6IMg55EyN39CUXKUsZEZxYQomZIquZIo2ZFA+FEvCZMuOZNXyJE1SUEu/9KFBRiRCTmRCql71XV1chiIa2iMxRh/RQlUt6diyfiT5+iUopiUOEUhQUEBk/Ihk2In6ieVRPmN6uYiO0FkcWZPOxmQUMmGAomWxBU42lZkfnYl3+eV+zeX1dUcX5YajVZvZXmPPtmXEvmXo1UzmIYdWymX1yiCdakbOXFpYZZpqaGVewmUaWmUk4mUdakn54aXb+loi4iYnkmXiakns9aYmuaCoGmYXJmYiokTwWFqN0KYpnmYp3mUUxklOdFsp1FrI3JrsZmaiCibqtkxbckeVdaZs1mZkfmUvNUxRMYez9aVtAmdyEk1YKkTZnId3FaY0Zl1x7mWtUNqwTGPvv/5mcAZms70APO2GGSmndPJneV5mddmkYCJkPTJk/VplssZn5d4lpTJn5a5nappl7Eomf6ZnFF5fi6ZkegFkjbHoD0Yk+P1XTcpk+7DkhZ6oRiaoQpFk//lPgXwoSAaoiI6oh+6ECR6oidqLxMaoR3KLC/lIigaoyFqojIqo24RhgRmoGqZny1WozYqAD4aowuhAWgYFGqImvDZo0GaokC6pCS6EBHgiPg5n4hVWzDqpCNKo1g6owIQpTsBARIQpmIqAfXWm0galL9yH1sqolq6pgUApYrhH+J5ps1lpQfipiDapmsKp/XDINmJgAVKnTllW3haok2Kp3yKE4zpnOz/GagBuqM9iSGF+qaH6qaJGgCZ6R/DZpzviaZKiqiVuqddmhMcYCWnAWjS6Z8veqeFqqdbmqi4ySCppqP9yaNaN6muiqVw6iFFYmvAQWa0+p91mqaE2qqh+qqjyi2vmRpVFqy1GXFqaqy4Oqq8WiO+ihrm46yCumPRCqrT6qWsaSVayanjGaB2SiHT+q13eSaN6peRGpifaqnHqqujqhPDCRxlqq2PWqvvuiLpaqzgihPNmRqbaqYA+qjnGiX/CqoBK7AP8LAQ+wAXp6/7mrBgsrDy2rAHaZ8c+1cW6yYYK6oaC5FT2q9VSqxX6q0AK6V8SaXhtaId5aBPmKtOSrNLDroQGcBxMjuFHwmhFxUQACH5BAAKAAAALJgAFwHFANsAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/4AAAB6ekpAif6LAEY2ftHl5XcsZY0lVLKxv7YXNaMeRMITLYdYWbRrN9sLGiwAADAoXGQzdKsAAG5NbcNxLCwYAFtFe6tdAKRkRNx9GXVQaKqptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwoMEABAQIOMjwoEIAECNKnEgR4sOKGCtezMjRooCOIBUSaEhyoMiDFVKqTPmhpMuCCRe+ZLgRJMaaNinizClxJ0+PI2c6FBCUYAUGSJMi7SD0ZcymBn3+lMqTak6rNk9CJah14AKlYBmM2NrwKVmBWEN+/KlzLduebt8CPYuWaMEQYZWKoGvQ7Nm0HQFzFJyR8E27dLsG+JAXbAW+BP2SNawxrlzKbeXCLToZscAOjcEugCxQ8lbME1HD1RxRdWvPnYtyCA3WAmmECvm69siad+/dABSftptBoXHjHkDkHQ3ZNFTg0C2/jc55+Mjj2AV4CMuUtPOm0XsH/5fOlrpu4tkVJg/L4fZ3oeF/k586vyps6wRn02Zg233uxPVdFWBWA6olnnDP3Qcabcx5999fBQYW4WATFlbhYdUlWB1joT12G24yQXjghZWNOGKG4N0XAF557fUhiOeZKJ+MrCGYYoYLWKDjjjq29+J7M8VXI4mZyYcifCq+WBKQLwmpmZOXJYnkkUoyxKRLUE5HZGpbbnYelVX29WBnND7Z5WsnfhnmSwsgMOZwBrBmgEJxajanAHXKdWeeb92JQIPD/QlgeoQWauihiCaq6KKMNuroo5BGKumklFZq6aWYpocAAZx26umnnx7gJ6ikkuomnnLSmSqqdqraKp4HlP8qa6enGhDrrLIKStYCBygEaFMLZFnemb4NecCvQgUrwLFnIdvUlSUJS1+ZUYKJpZRrwvRmgtRq2e2w1kaLbbYDQUuStPZ9S1+4545LLowAqpvujEaq+W5Z26Yor4D7EshuQzbeW1q+SPZrIL1D/kuTu+SaCzCx4xksYZoAKlylwwtLTKHGFlocFcPZYjwUx4eRXJnHBQUssMgfm1zkkBRDiPKHLKcMMbr81luxwFYSHOTNQLvM5cwmgbxmzVwFjbCZMcfG80FIFy30aktXa+/TkfncpNIwV60l0XWB3ZzW106NptfgXo31wCGSifa0b9sntsr3Rl2X2cUyrbPMa2f/3fZweEcct4BzG81QA3TFhDhZDbiqp+N9Qs7WnquC3bjhBl2e6eacd+7556CHLvropDvK5+SS80T5q6f/tPrjrMLeuuqpq37A7bjnrvvucSq0++/A98q13l3DbGvwyAsPwPHJA+8sm8I/75KygeNMILO7+i59Sdu7ZHcA1h9cvJmFi83X9+FPPLi/aq+N/vBRVo/5x+YnTra48stfft/l3t9u/utTy/74By8RBVB947Pazgj4PgAm8Gvtw1oDD7gxCnYsgk+b4AOHpT8M8kyDxAth/PbmNAb672EOFCEEF8g/EI5wg3BLmAdXdsKMWbBkNzzZDOtWw5Hl8GUqTBsL//vmQm/9kEtNw48J/8atI1INhnLb4buKyEEnnk2GQ3RfD1tmxby9kHxSbNgWbZbCLyqQb3RZHFkUd5bLzS4nr4tc7OT4RpvcyXJ0K4nmSsfHPvrxj4AMpCD9WEeQxNF1tYNjIu24SEM2siOHROQcJ4e3hzCvebtLXwWNh0ngWbKTv6vksrrXEOp1UXDGIiVDlIW9rSxAlGAqTgZQeErzZBEqsDwIBHZpnF0+gItQzBkW0XiWXBbkAQlIpnGSmQBgBjGGYLxlU4xJkAswc5nJnAAZawm0ARazi8KBADMTgM1kqhF88DPiMEv4zWAKsCDjJKdCxkmBpJVxhcQkCzUDMP+BeJYzmb+8GzeTqCG+UFMD8ZSnAOJ5AakNlIRKbOczCTcQCST0nwAVqDvFF818buWgCcWAADDAUIduFIEdZac+wQkbi45zAxjYQDwDis57CtGjuGRpdRKa0HqadKLCTGlEV3rSCxJEnDxlpgbs+dB1DvWjOjWINZOqTaYW1UIEvZFBo3rMpDZzm1fFoVMLSpd9DgQCE0irWidA05+aUZ1CJatE37quGDU1rlotK1cNGNYSQVSuRAXqO+N11zOqdCuFhOQjM3LHNC6WsY/FSCRpNz+CXA6Pg8ysZjfL2c56NnSJ5chkGTlJSYYWsqWl7GklG9mKjNaOoOTdeGKbyXT/VrGvmbkkbT9JW9yp8iC88lWzNIlVbrYSKqz8rUGUC7UxWlWwHKVrXaX5Qee6Fa7SnddY89pC62oUuijNLkWpS0Mm6quw2MXnYSXo3Zqi97Z4nRIBC+g23CKRm94kYnuJK1bwUii/WjRvwd4LTcM+lb0C/plNC6zeA2dwv7ZlMHwNDNgA29W+T/SvUXH64ARvbcHaje/P6ncWKkr4xCGmMHf16+GyETjFDa4wgi+s4f6Kl33k5WGL8ffioKpYvkuk8Y2jm96brrfDQi4yin0c4xVbmLAYvmKNdZhjl5wTKmxkXGsp8lpHplaRXyYtZjH72TKb+cxoTrMgV+vaLUuk/8uKDbOX2cxlN7/ZzhGBs2gdwOc++/nPgPZIb3PHX79GmTe6jS1vB62AATj60ZCOtKQdINzsRRjGE9bScYGlvWY1WtKgBvWpkpzpUi8ZxxwWyqdDzWpHjxrKUwbikCUEYKisutWhfjVfY33fQ4+n1k25Na4lrev68jrDs/5vGEki7GFDutiA6zGBsgrkszTb2a6GMIiZbGQHqxrbota2tIncbRkHG9zEFrevC90WYH8b3c9W97GlnOwNH/nd8M72jv837vA2udpkuTa2od3EdV96vKmeicCdTfDzGryDVS7Jwofd8AE/HL/LbsjEcV1xBfd7kyLemtg23uqOf/jjxf/dLsC3QnJWm9zFFze4uxWe70e/nMcxn/dcIs7smuub1Kcmd9BpnXGGtDzX8q63jZUcRZ5rXAFQj7rUp051EyjkykJx46roPBE9o5brd67sQC7rWDWb/exoT7vaEQX2PON5eW+Hu5zj3HaIeJ21cxdt3BMNyt4ta9C3Y3evdR4xvndy0b1lLkGCKwDFe0XwyGa6gDadrE7v6nxJl/y0Me70MJkY00MH+Y9HPN/Pczv0KQ/5tUi8xsybGvSwR/i9ywv02G9e5kW/mOtRv/TXy97btIe10g1N+DyKi/VbMf3ti9/N3CtJ+UK3ffR/b+7q7puWOR9+u53/o91L39++R/X/7HVc+9N/X/T/Jn2Qha/56S9/9CIvvffNT//3pz/+69+19ge/fy91XvfXZ0PZ135E93/PN3/2F37uN1gJR37sp4Dgx3tU1oBThIALGIHnZ2/A9xJYJxRZthVaxzpbN4KvMmZlt3YomIIquIJrRoJ05IIvKIIyKDtyMiKAF3gHl4ASKGsEKB4U4ngCYUoDCIHoR4Q+eBjlp4MZ2Hs7yH9HSB8HMQESMIVUKAEQ4Ez9R289mHpZeITbgkxJdWWQp4VGyIRL+IRwIVVeJQFgxXzbpoRoSCEFgVRe1VZj6EVbaIb1F4eVAU9elUwNdV1lSHxdiEqENyIE4VJ/mABX+F2F/3iHhliIiCgQCLWIzCSITRh5g8iDmxiH/1Ec6RFTCRVQkFiKOciHbVEXhEJS4xSI7jWEmUiGseiFMgGK2SGK40RTpviGF4iKr1EQU7WGz/WIp4iBe+iLv0gQYOhV57SLKKeHyJiGBUEBXlVVmHiGnDiLkZiHqLgtDeBVWMiN2YiNTtiJaJgvD7BL6rhLS9WGxMiLxhiNyah/4liO2giJk0iP5iiL5KiJ2piPxvaOz0iIfbIqHciBe5eQefd1MIg6C4l3LBiREjmRFCkpdSd3dXd3bSYABdCRHvmRIBmSHakQIlmSJakQCNCQplWDLDUiJvmSH0mSMAmTImF4mISPLf8pHzNJkxy5kyapECUAhAEghIeYk0Pikz/Zk0gZkgoRAUnYi8i4G9GxlCIpk1QZkwLglAYhARTQlV5JAW3liPV4jsQylVeJlWeJlVp5VODojmP5hFIJNGnpkVaZlk1ZEN9YjW65j7TITXM5kkppl1lZEIoYhsP4ln1pcH9ZAHV5lnc5EJX4hz4llnzpg3Hpkn/ZmFf5mAJxApaYALpYjNF4mTqZmYHpmIMpEHS4iK6Ik4dmlnOpmVR5l7aIHbgYT+3omjoHm4K5mHdpKKw4TmxImf9olGbim6e5mYNZm8dxm8ykRrrZhbyJmsi5lgJhiY1InP0Il2Upl6ZZnXO4iHthWZyv6Z2xmZyzmZrVVIfjuZ2JyXzICZ5dxVOTeY3HOJrdiZnnKZ/H9AD++Z8PgHXRiZjTqZz8GZCIaZn5WZr7aZrWGW2+Jkr62ZsO+pTxaEgqCWYXeSfxaZokUHYbuncBAQAh+QQACgAAACyYABcBwgDXAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dv+AAAAenpKQIn+iwBGNn7R5eWPJFN3LGaysb/CEyyIWFm3FzWjHkTbCxosAAAwKFxjM3WzazirAADFcituTW0sGABbRXvbfBmqXQCkZER1UGiqqba+vMW70tLTDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wADCBxIsKDBAAQECDjI8KBCABAjSpxIEeLDihgrXszI0aKAjiAVEmhIcqDIgxNSqkz5oaRLgQkXvmS4ESTGmjYp4swpcSdPjyNnOhQQlOAEBkiTIuUglGTMpgZ9/pTKk2pOqzZPQiWodeACpWAZjNhq8ClZgVhDfvypcy3bnm7fAj2LlmhBEWGVgqA70OzZtB0BcxSckfBNu3S7BviQF+wEvggV8jWsMa5cym3lwi1KVjGHxmAX8PXb2fJbzBNRw9UcUfHWrh1Ag9UwWnJi02xVt8Y9lXdVxH+Bf1YawkOIvLVlBmftkTkA3c2Zu4ba9ahSDwI8hGVKl/Rr31fBZ/8Vr9b59KaKQShVKCBsh+STyQeWP5h+YfuHOX/nHDsp+9mQeUcdfpWZR2Bm0gFXmn7DMfCfUqLBd5uBFCZooH4D6scYUg8i9ViAti1n4YisQfecgvsZhJeDCiW1F2SRKVdahSUemJqNm8WH4QIaaMBejxq8B6OA6OG4G42amXieUEsGwB6MBRHJpJHR1YjkZShm2NCTUPYV4owkJknliRfq2BAGAmDQpZcyfnflaWMqmWWRGK550AIIfPmdAawZoBCfmvkpAKByCUroW4IiEGFpin7H3qOQRirppJRWaumlmGaq6aacdurpp6CGKuqopJZqqqUIEKDqqqy22uoBibr/KquseQ7a55+32hoorrsOesCswK5aqwG/BivrolAtcIBCyDa1gJxv5hanAAc0K9Sz1FprZ1l6Dhhtb9/+VueU425LkJQzQRsmlmXeVq65MHVbZLjh0Tveuy41CW+M8dlb3rpw4luSvvCi+5K6VgIsrcAkEWyuwflO6+987QbHsJ0QDyyxwuAmeDFNc+4bb5vecvzbxPV9PJTKUGbc8MYJxywmywU5vK3LW8IsJsr30cxVyCLjDDLPhxFdmc8mAb2v0EMZjaDMWCJdl9RnMR2VzuyaHB7VNmMs75RO3xh2ju6KzJDVNWMN59i7ca10wV+nq7a0bM9Vttnckjyv1uPV/32i21SThfbPfiM8s5l4Rxn3wXN3DHXAiEPVQHcKTU5WA7wWmjmim7NlaK5SY/42yKeWbvrpqKeu+uqst+66qId63jlPn/ca+0+1a66r7rfTPrtNBwQv/PDEF8+nQsUnr/yyjZ/Md3nELi898wBEPz3x2r6kLLNnYVt48+FV2z3y2S+9eMTffw944orrDfbzFMOfcuTs8zuh/PepTz/7gyedPv752V/i+leX/z1uYQLEGwGdBL6+AfBoCTTbAg2XtQP2Zn31G1m/Hvi0nXksgkE7n8YM6MEaYTCDE2zgvywoLhCaz31yI2EFD3c3FIrwZTJc2wdrWL8U5pBuO7RYBv81eD8W1ouDNzphD2+Ysx86joZCHKIPkbgaKrbNhXCDIeOc6DwTYvFhTByaFatUQiguaIj2E5ERHbhGtShRKJYTXOXOIrre5SR3nNtdHu1oE0GFrmtpe50gB0nIQhrykIhEJB9Bgkfc/e6Oj+xjJBk5yY400pF6dE4X2WW96xWPgjq0Uic9ObyHjNKTdSPf+FQYvxqJjyzYemVTUjk6HI6RTEE841ZoWacH+PKXvoTA1bi4tS8ehJcGgUAClslMZg7zlnJ640yQWRALNPOaFUgbMe9lTINQcyAPuKY448jAbbqxmwX5pkDEKU4JEA6a05LmS9RZAXaKU5gFhGfFdAn/lW9mwJ7itID/9JnLFO1yjFpBk6QusIF75rONrTSjQfuJUMRU6gLXFOhDyzjDqKGTINRUaKQYek18lpOgXuShUNQZAIAy050DhWh99jnRWVZUP+F0aQbeKdP8FVRLB+0pBKsJ0GzylKOhlChQKSrUtmBImfZ8ZlM76FGVTvOmB3lABbbK1QqYNKZIBWJKo8jUsLawiGbdZFXJatOpkk2NaT3iWPnZ1rieE60dTepa6SqURVqykhnxI13qmCu/cuSSvqslQ0Tns0Q69rGQjaxkJ+s6wwYWsBhBLCQziUnLZhazn+VsYj07EVIm73jUMu0nWTlTfZ7StKZUbfDKV5Lt/wmAtiTxHkrtqhZZJkuVNtQi+nabVwRaVYFhbBpx9Qq540owuVLlbUSLe8GPtgy62lyuWJVKJzRO0a1VBO8VnRtC4Y5Qu0/ca035h92jUletzDUuW9lrXluKl4zvLSZ5X7jB++LSv4BcmXfbC9b8slG689tvFvuLYJ8CWLEgC9xWvtvgosFTns+tbxPRC1/58nWABN6ogVdY4aHOF8QaFuOD9adgMKZYuSu+sHWHFOKTxvjBGC4vg0c83fhWt8U3qzEot8vjBJ/4JeSESkySDEfQVkSzkhTtZklLEcHSMcBJo6yWt8zlLns5kVSeCJT/KuUoh1kiYybzmSOS5sM6Wf8iDoiznOdM5zp7RLbEG3J6fXyy15IytrJVwAAGTehCG/rQDuAeLPXc4T1Ly7fOAm5TBH3oSle6Vjvms1xL7NQZB4DSlg71oDGNV00fuMg98zSoRW1pUsMV1RbGsapZLWpXg+nGnE7irGndaiGz1sG5fuuHX7JqXhfa1m7i8KbVu9RJG/vSvjZnjz283pkU+9kDQHbJcA1rEw/bJdd+trb3xm1T3/XI4Ma2ocf9vnITubnoLkm4jc3uGCr71PD+trzVfexo35vEzO7uVubN63pv8d/T/nG8SUJwWht8uO52tML1zXB+E/rh5414o8+68IY0nNUYt2+wjyRjIBvk47X/9rfGl53vahPb4qNW+cjxa+755LjiMA/5hleOb2o3WygKCLrQh070oivABHO83JvFvHQ0N53NT4eIlZUO4Z99+epYz7rWt26pNUs96m2+bJkpOXY1F/bsb8Gz8FCrduExmuXvppufPQlo1eK2Iba9O0N0y3OAwwnS15I0fTMdd4lvXL8ddzHhDQ93xnPT5GuicLepWnMjU3zBpS784Xs+cZdnePGb97vmEX95xWfe8aJHvc09LaFXVx7Yk9c15Lsk+dfHeuZ/Yz3lXhzd2IcX91hOm4SVLHPfk1zWs78u77Pb94RzvPRBXr57bV+gkic++qBvfOidT3rP6/j0228t//Kv7zXpF5j6lB/948kf+eKjX2zWh375s8/58MO+5T9HMf1Tb//bB5xcA2Z+IvZ+v2d8dsN+BMFkQrFkV1Z2buaAYud11RN1E1h1BcFYpcF1GriBHNiBioR2ewSCIWg7Iig7EEgRbQcr/2WAK8iCb1d/HaF3d0I9zSd+wPdrE5EcCGeDLoiDEpFVFBCEQkgBD5A3O3h/6sd/2tcRyQRQO3Uu0PGCSgiDVDiFGWEQ1mRPMMUmR+h/BHh8wVYQOQVQXxUTXVh9NYiENlEQLrVMGjUyZ5h+qseDBEgQFNCGy1SEcJiGXpiE3FeFGDEQ/4SHy8SFfIiGNzg2AiFSkEJS1/+kh2Z4iHLYf4joe3UxKRjVTG8YiYkYh/AHXouIiQ3VTCbFiT0obXTohxVBVC61hTHiiQX4hTSnihRREFAFUORkirLYgrsohX94hQVxh/ZkVIbYiZL4iWF4gQBlhMcYi7TYh3NYi8kETL/0hFBIJb6YitEIjZQYETrYjGB4ilT0jcZYjuL4FuR4jubYiziigDMRE14XdqElgfL4ZBRYj1X2dB64j/zYj/4IOyU4WrlSAARZkAZ5kAhJkAqRkAzJkAqBAAEpkJr0jJMYHg15kQa5kBiJkSIxd9eTjRPpjN14IxvJkQJQkhepECUggwbBd7g3c3KCkg2pkTKJkAoRAfv/94shiYzsWJMJSZM+WZA3eRAUIAFGeZQS8FUDSJEhGYXTEpQHCZRQOZRiuIzM95IGGJNQKZQnuZUKKQA4mYAuRYxLuY076ZQG4pVfqZYFQJUDIYy4OH1MOZFomSBsKZVB6ZYBMIitKJdm2ZTY+JRqiZc+qZcnQIgmBZI7yYt/SZKD2ZVe6ZZj2IZvqJhnGZhp+Zh3CZYBwIiP4ojX9ISWCZjwtJmbiZMXJU4UcH5zqUl1WSOm+Zg46ZnsAZrMFEejSZeYaZeaKZts2IZ6WJYj6Zq7CZu9GZmcSRCTyU5XyYIwKZjIeZpYSIbNuYvPmZnR6Zu2qIW9Z51ZCZ1bSZg1EKmXAwEB5nme5slkuQkAAQEAIfkEAAoAAAAsfwAXAd4A2wCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnb/QAAAHp6SkCJ/osARTZ+0eXljSVUsrG/eitjthc2xBIraDFxjVpVoR5FLAAAMChcsGo6qwAA0g4hZ0pyx3MpLBgApmVDW0V7eFFlql0A2wsa2Hodqqm2vrzFu9LS4H4XAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBARYyLChw4cBCCSESBFiwoMYMV7MyHHgxo4cP4LUKIBAxZMME5pEeXJkSIUsK0qEGdOiAJcZReIkqHMnwps+C6qsSXEo0YdBhdI8ynAmU4c9fUbdORVnVZdGny7MqjUpz6VPnWrdCtQrgKsj0YJU25HrU7dMzf4cu1DsWLYv5Z4t6xVvzpJ0A8A9qnciXbta/ZIszDep4oODiUauyTgw4reNgz42uFkp45V3AdOtfNhwaMao5Xb+Cjqx6LGkx15mutpjZqm3qea2+tp166extc4+WvtnarPF9/6m3Ru4arBMh0vejZV6Wutrsbdtznw54eeWTbv/Pt5Xe97n3qenpwy+NHTi5v+Sdxyf5PqYk2MGDyse83zN9UEWIGfcwXcfS/tF1x9z/+HWoG4HopQfgu3JtiB8D/KWYXURnjQhSgkeJV1NyZU4oGfoBfZhSxUKd+F0G14XY3YdVrRiRSESNSJ+J34143afqVggUQaYZcCLMS2AAJI8FunVkQI4mRSUUgZFpZEJIbAAXVluGVpCYIYp5phklmnmmWimqeaabLbp5ptwxinnnHTWaeedeOapZ5lV+gQlAgQEKuighBJ6wJ+FJproklFi2eiTCfW505WQRnmAopgKyqgBl2aKKQKMefnUAgckJCpTC5j443nICXDAqUel/+oqrEQtkGNNO7Kk6nOr/lVjUUOy12p478HI67GtBsllgRU062yzICDVIn/Fktijbb0uluKyy1XAwLfgftuBtMO6J2S2AqJL4K82/bZAuPAyEIJDt8aUq4TXGodsecqG9tsH8YaLAr3TKlgtj+qimOy2/jbEQcDwVtBQvSzd62G+eyXsI8O+NdQBxPCKSjFKFtuI8a4Lt8ruQ1mFADK8GTA08kklA6sxtvvStzJUr30crggeiBCwlzPLxKSuJyd98087N5SVt+F6IIAH8Y67UNEU1WzT0hnnDGDTKfVGQrhgxsuBzAWLeDS+XKNcHthktfYwuGWH+8HEaeu49sVtK//NMWa/ZUB3QiHjXa6FByPdd99wC9YcCIMLEK7EhpdH7LleO5g5hEIuBzADdTNAArmWm7vs4pvz1vhgGWQAZusZnE3w4S4mznbq1THeuckHYw2R1iz7nTK/f3fHO4u0U4v58PTpzm1FGAiAwUmS4gRlA+5hr1UDkTpavUuUTtl9pXBzHyxL5u+p/vrst+/++/DHL//89Lf5/UjhWzm+/o+K3z//9wNJ/vy0PwAaiTEHSKACF8jABhYpIQ2MoARLJTziMQ9AAODUBDdIwQxycIOhGgupTCVCtzUPd9d5VQlnJULfPQR4PEPdBXGzuvNRKHkGW54Fd3hCle1uNHnD1d7/TCZDHn7th7AJor2GaDMUZsd5DQMiDtVmO745EUhXfEkNG+dCh8DQaRXsoRFpiMSuKLFiTNxaFuWzRvuU0TlT1FsVidjGdNVxXW+MyxlJlsbgFVGMOsvjd+IoxDk2cYa6gWLHpFg6xOkQkBi8o1K2GJguNuSLYftjJH34PEbS53Knk+TGEKk6QRLFkk3pYwxFiTNScsiUwmpk7R65yTEmsngGquQeaaZKMGpSc5yMYhIJuURDqtGVMmIl02Cpn10azZh+VGbXkEkjZt5QlsoLJTWxuE0tWhMlAezI9bI3FvOFkyMDnFQBCfg/dpbvRhBJX/3mSc962vOe+MynPvF0/86MpNN66wRoO9U5UIH2EyP/BF9ADXpAaULwgxM0YS0h6SANQrSBF7HoRRfYN1rVRFa/vGWrVKgVWZF0VH2jJTBtqSFcqkeX3fwLRB5A05rSNALRjKm2dCogSnpyogB6CAUSQNSiFjWnLM2dQ22ILy4u9SEbMKpUJbBKnhJIkYCDaVKv45AHSPWrFPDlUrFqvJ+u9JMN+epXJyBWqypsq9Xs5DDhup2GWECtX8UpWULa0mAucq4U1Q1DGoBXtWZyrEv1KWCBipuFRI9MF9DAVx+wV8S6lTXfRB5dX0KWMl3gq4e9bCs366vM4uipAXjsmCIrVcpWVrT6gq1yTEuRlP+mtbBEZWtoSbtT3vaUthCxLUO8ituw7jawffUtHuVqRtkuKKp4pWpblftW5L6SuXCk7leEWlikWjeZzmXqxZwa3pla4LzotYBep/vdJyYWuKRrb121qd3Ryteb2NVjeel7XzaGV7HNre9c+MtYkQp4tvk9ykEPklD8JUR7TzGn9yZcqQUbBErvFC9F5LnPDnv4wyAOsYhHHCYLF6TBAlyogwuqUBav2MQEQbE4Vfziho50owx8oKtwzECJnrXAGtIojzPKY44+x6NJoiCSWQJSyx4YgktGiUmjfBJbOTNrvTwukJX6X/jODps5JPCPx2zgQCb4lFf+XZZf++QwHvH/zLFEqyPFXOYtg/dtXq6cnGdJ5+T2t7dmFmaA95zN0zj5z7+FczOJiUZoVrXNZM2lWQVrOkOHN9IvnTRvQGlpSL9X0dckdJg7jeirflrQ2RU1FVVaZzL7+c2o1i+YV91nLnvar1nVdHU4PZ5D2zmusR7krOXI6le72taB/uugg1rpXl/61MpONbPnTOpfc7PNAJZ2Y5vtH18f+zrZlrWqiV3rO5e6urCOtrinzedqf9u9XQY1OB0FYWrV+ygSrjCF/QfjgWCYLubLMIkHTvCCG/zgCOfTvg3I74Wzs98CkTE6aZxiFwuQMQ7IuMY3zvGO/6TIC/Rxq0ceZJAnkMgm/2eMAgbA8pa7/OUwdwAJSypyY5O8OidFFZRb+JyVw/znP2dUsZFtbVbhWd6apY/Pgc50lgu93PC+9dGDjeZWLb3pQH+6u29u7qKXFumntTrWm651Z0v93JgFe217Pnaml73bz4431eMMoKu33eVvZ5C3uQ5sdQtb6XcP+podx1eik1HtwWV74F+edwzt3ealRHx86754xg++5obn+3bC/XfKV77ljTdW3LGdZ7SJ/fNOv7yb333tZOd6sbix++JDb63Cd/3wc190eWQfeNoj7PGZj3zuQ+151PteccC/Peck/2XAo34Ax7/d6NFuG85XfffPh77qbR9115cV9rrh/f/do2/F6XvdjczXc/E/T346mp/1+B0+iBRv/O0nv/vpfr1WIJ7BhCjg/wAYgAI4gApwAg9WThQ3YxangPwncf4ETw8RcACXcBRYgRZ4gRjIPg2YgBO3gA/ogR+4gSCIUBwYgg43KQhkcjpmcie3eppndM0jZDiGciAXQiWlZCvEfa0HIDkXKztXUqhUF/b3fi/4dfKXdOxWaGZHfbFFeulnesNWSEOnfJAXfNd1hGEXhcU0hfgHf/41dX53feMmhVC3g14IaPn3fcu2bdS2hOdnR3IXhnTHhu3mhmcIh06IhWvHaHzkaOz1hqYWh/qnbZTWhnB3doA4SaV3NWn2QkP/iIh3uFx6mHh8yEt+qGWRiG6ZmHaTOHl0qISHyITTlIdyqHtjuIVlCIOJOEpgOIjr9omjZodFiIabWH2LGABBGBGPKIqYB263mIuYxGa86ILCV4rEB4u0tnVVSIVW6ItPyIiV+ExcaIaziIfeJ2ngt2ncpndEuIx954oKRm/ktD0lSIIjyGDliI7neGEQ6BASiIAZGI/yOI/0GI8ieI/6lo8Np48Mt49TwoI6tjS92IXVGIgHRmUVMULEyIwMSZDeSI0PyRHEIpAL2ZAQ2YwO6RIzJQEc2ZES4FqXNJCqWItNOIxcw114ZVypVJEXaZEjWZCaCJMEAVWFpVsrSZE6//iSEfmFSdFVuJUAIKmLLKmTGNmSGemSPBkUDvGTRBWSQ5mUJDmKJilNdsWUQHmTyiSSRImUtCiTAzFYVklU2jMTOHl/RnmWWwkSjuVZktVaQviUXbmTcVmUaYmWGdFZkAVab1mW3UiXUOmVArGWkNWWRbVeZJmVcDmXXGmNq/iVDQFdNemUfAmJgCmVjRmYDTFUxSWZiJmTfymXjOkTDyEBhSVdWAlbWvmZfqmYR6mWD9FdXpSaoVmZsjmboGkQEEEBNlVTKnmaAlabBjmVsjWRnWmWdXmcqjkSxImaiWmbq+mci4mb7jGZwkmZt1kQ90ZF+OiP/didD3eCDFUp9f84nuRZnuZ5T9vJnQQlggXQnu75nvAZn+2ZEPJZn/WZJeDZYg3omdCZHfb5n+9JnwAKoCohgxsFnHoRk9dpXwAyoAQqAA76nwlhAghJEU0mW/wZnA0aofcJoRwqnwkBAdOInAnan3Zpoi/xoSDqoSoaoAIgoqM5ATI6oxOwXphYmRmqoNXRovApoDw6ny/6EMRlWI8mijnKivTxo+7poz8aou6IWxZQpJeZoAjKoLihpECKpQXgpA1BmrhVb1VaopZJkiaipUzKo1y6EJqJWzZJeNJ0pFaqG2bKok0apAwBmbi1XmFaontaplh6pi2apkP6kyNwowsqpn2aNHM6pyLBqlpiwlpgJYxTCqcluaFKCqgq6qRn8llTZajPKaZxeqhjKqd/SqdoGqSOGiaQalQQtqdU2pw6eh2LWqowyhBMGZSuSqmjyhuzeql2Olw/KaVRqat+6quM+hAjUFg26qYYapysuR29Wqe1yhARgFdtKqnD6qwo+hfReqrTSq0REK7iGgHZmavaqqFXWqrHmorJCarPSqLoSqrGSqsj2q7uGq+iWqx1uq7K+Kknlp81pp4E1a2BKgAlMIHreGLpeGEBAQAh+QQACgAAACx/ADAB3gDCAIX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dv9AAAAenpKQIn+iwBGNn7R5eWNJVSysb96K2RnMXG1FzfEEiuRXFIsAACiHkUwKFyqAACnZUHSDiHLdSZbRXwsGABnSnKzazh8U2KaYEuqXQDbCxqqqba+vMW70tLVeh8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEBFjIsKHDhwEIJIRIEWLCgxgxXszIceDGjhw/gtQogEDFkwwTmkR5cmRIhSwrSoQZ06IAlxlF4iSocyfCmz4LqqxJcSjRh0GF0jzKcCZThz19Rt05FWdVl0afLsyqNSnPpU+dat0K1CuAqyPRglTbketTt0zN/hy7UOxYti/lni3rFW/OknQDwD2qdyJdu1r9kizMN6nig4OJRq7JODDit42DPja4WSnjlXcB06182HBoxqjldv4KOrHosaTHXma62mNmqbep5rb62nXrp7G1zj5a+2dqs8X3/qbdG7hqsEyHS96NlXpa62uxt23OfDnh55ZNu/8+3ld73ufep6enDL40dOLm/5J3HJ/k+piTYwYPKx7zfM31QRYgZ9zBdx9L+0XXH3P/4dagbgeilB+C7cm2IHwP8pZhdRGeNCFKCR4lXU3JlTigZ+gF9mFLFQp34XQbXhdjdh1WtGJFBphlQEINuNejVg0klKNXOwowZFJFHhlUkjreCFGQBdYEZUJUVmnllVhmqeWWXHbp5ZdghinmmGSWaeaZaKap5ppstulmlkr6xCSRQtJpZJN32hnnTnMiWaeeOjJ2wKCEFmrooTkmdOiijB5g4ozboWZAo5QOetGklTbK2AJjLeCoAJxqtcCjz0H60gGhPjWqAKh2GiJRI+L/d+JXpsqXIl1OQvRqTbGyRCpys9r2mYoFVmDsscZ2gFSL/L0HY6nQAjssrtxVwMC12F7LwbLAhucsicEaF21504b22wLZpstACA7tGlOvEoa7V62L3WquQyCom267zCr4raz0ChgwgTUW1dsH+qYrQkPusgSvh/L+Sq69vjXEQcLpKrtQwyg9bGPEIA+sVME2gRYCxumCwBDHJ3lssMi0jksfyQ9ldXG2I3gwgr6hsizTi+DCLKzMANIM1WvWZuuBAB6ou+3G/YoINMBEO1g1hMS2dvO1VKqrcQA+U+SyTUKLKy3FmLWGMLZdZ6sBw1HDOrWvIV+tIdrd4ct2Qumm/wp23LzOHW/Z89rNYdYOdbC3ANlWwG+37hFLuMQzI+6QCFzzfS0K3JbnreSGyxg6jZY7pIEGVJ6uwQed0/c5tZPXDazRDU1GJYuQW/gv3bHHTntKUQo2ddgQjV2z7BOfPXvpD2EgAAYn7YlTkT+6KED1TEEpvUt9Lvmnn3mC/3sAUI4/5Zvop6/++uy37/778McvP5jbj9S9nN/jH773++tfP0j341P+/Beo0W0nU5SiHIB6hykEHupSDlxU7Px2lFX1zoCnoiBRVtUqUcUOdMpLnggrR63x/S2ErnvIA1bIwhVG4HgXRGHRmOccGeLmIRJIgA53uEMYYtBWNsRaCf8D80GHbICHSJzA0WI4whkOcTQ/JIlDHoDEKkqgdsijj+9oGJcoCsghVawiBbDIRC3izUBE9CKBGnKBMFbxhWQp4wLPqJ40BpE3DGmAG8MIPDlabXlPhI0alcKQNiIxAxbIQBUfEMdBxuyOhwtkVxxpmzxW0QICsEAV+0hJszURNybMVevmmLsAGHKHVEIiIxsJSdG1knSSrOEndQNGHqZyh2Pk5Csj1UnlcPE7u3zJFG2ZEB5eUZezvFsw/xLK4Omnlws6og5vmQAlkhGaWXTivaC4TCk6JIfTLKYOfdhNgWGzmSYs4hQvcAEqsROO1ywngbYYyy7Kk5Af+5YC/5j/zEhuU5D3/ArE9JlNfpoRkP+cZEArWRHnQe9l2KRnQp/yv44EcHo8Gov28FRRjlyUewMUoDNRUj66nG9+KE2pSlfK0pa69KVr6mhGPmq/kIK0fyKVKUZoCkCb3lSnB+GpRQUVQUMlilVFNdQ+dcPApBYKgk4d1KY69SkN1sSCEcVmB1WlKKvGZAHEe4jxlpjVhf4EnXb0XORgV9Z+Xget3FSr7kDo1uxItGJxTeFc2WrWwvVVlEdLJ+DeJTiI+ZGpdASXYEvZLLoetK68RCheASpX6zmWlJA9j2TTlla9WpavmQViaO3zS6KE1SFjjedozfnXkcZrsZVtLGgfS1vM/5JwovaMrb8ua1Db+haUpWUPY3c7298itrVwpaxnZXuattYWuPUEpm6ltrvBOde4vEmuQqcrt+oa9rq9hS5upbtc4ja3tXflbF4B9Lrzrnae5wzuM4dLXd4e970o2mze1nvDtbr3ufcFcHblyxKgGoR6PtKoT2uKU4w2+Kd4Mh9g8wjTClv4whjOsIY3DCeOehh8Bi6IUD264J4+mMEhJsiIZ8oYB7j4xTCOsYx/ElVCLVWZ+KVVA50K1agyRgEDCLKQh0zkIjsgIV5lCVbRq9Uko4SDTj4JWJ8D5CJb2coIKOzHDovj2052u/Sp8pXHHOQse3fL4A2wNr8syzCT+f/NZrZvl7FbnXKxObdufvOY41zc8KrZzwOOrmmprOc9axmiTEYugUFE6EJj+dBkS/OcxXtn8gJIzI4eMp//S2dX5tg22m3zpTNd5E2PR9J1TqysYJtnUgvZ1P5Btae9rF7ljtrVr4Y0OT/tSQH7s9KDBhamXQ1rBsnarqqmG6tvjesBFBtDx45seUKNZ2bj+tnPSjSvfSlo4ZZn2KTGdtCirdlpLxp33252mXVNVm37+q3nrsiP1e1sdqv23chWdLfnm251i5tq7u40LMcb7H43+9+8I7doab1fW+MG3JlGuHUDDuhf11orKR5IkRTA8Y57/OMgV8AJMgqkElvU5CT/PrGJMy6QIknYtRU5KYdnTvOa2/zmOLcSywGw4p2i3OcqP3nQU77zngf150AvOlGjetQaW6qgf466hnac1B47daqiqmqnbpzq1m6VKVB21WAdZm9k4lvaZ38JtS3d371yuuKzFvh21l7w8ta3z1Lv+rYnDLxlt/2zb8973OE+cGB72+7dlbPe077wNV9c1H9n7qkpLvjCP77akTfv5PcO9UAT/PDs9e/mGV8vfX+e34gP3JkRzfn0NhzMoXf76OVebtILiO6gz/zdAz/pyqPd8a+HPC1FH2vK997zhkd97AE/e8L/3vlqjzdFTtuQ1Jqd9o2HPjOlr6uxd6zsrGx9/3z3TSH6Jh7vx1889klLfkabX/WKH7zvaw98NPJ3+LIvvvhNn/zycxf+6Kd+2ld65tZ+0YMn2OMvCUgUGwUoDghiEWZSfEc+OVeBFniBGJiB6lN0SHd0Q5d0HwaBD8g/HPiBO+V0R1U2XCd/6ceCArgTUVYRnrKCzzd/2WeDBGh7BnESDvUzneeCQJhv+7dtEPEAEzABVHKEq1R9NEh/A8haQ6iDBYFDqCROCXBMTdGEN9iCQiiFj0SERlSFAoBLqKWFObh+UOiFQ9NXwySGPLSEEWGGaYiG8GV8L4gTteSGPMSEP9iFdJhfUfiHBMFGSERNOrSEM6GCfeiHT1iHYP9IPlVkiDr0I4nYSXI4h40IiGooEAxBRYVohTu0SpVoVpfoiJvoV48YAJeUSZtUF6X4haf4irAoiAPREJ64Q4ikSDwET6PIa7K4hoGYiYPYENK0R7nkiovohDiIictoirTIiQ0BTnuEhXGYjFt4h9gYhDXIhR3xEBOwR9aUhdZ4hsIIjLE4jhgBEXskVr+IiufIZdkIEhAhAS3EQtSIjIoIj9qojNzIEd6SjwpHjs2oiUHxj5aIjszYj9e4jxlhkKSIkM5Yjr32jACwgN1VgixndAfWgRtpgh6IkSHIPxo4kiRZkiapgSA5gjmFJwXQki75kjAZky2ZEDJZkzWZEAj/EJIERCT6uI3VYZNA+ZI0GZRBqRJUV1TtqBfu+IwmQpRFKQBOCZQJYQIxSBFLtm09yY8/GZU3CZVcKZMJAQHxx4hKqZACGY/b8ZVg6ZVqKZQCIJbeSAFyOZcUAE/XJ5FK2Y5N2ZZuyZduCZe2uI7thpUBGZG64ZcuOZSIGZYOoUd7dAGDeYpZuZDZgZgzyZZ+yZgN8Y17lADYk5STeZZbuZiYyZeauRDSaIz3RpGhmZCjmZml2ZanGQDF2JnwBJqFSZC4YZkFoJiwCZi32JkJQAJ3OZBlOZESuZekyZunyZm4mEhhdEy4aYdkyRvMGZtqeZqpmQCYpElJVJxmeZyzrJicIHOd1wmYAeCcCSCJngmeaCmehhmeunmYlumbpvmWebieoJgAcDidwSifSmGe9YmfgRlOY7iHq4mXrRmfr3mf5/kQJGCguxiZrJmb40mfyzmg6LkQESChOnSM7smQ8GmOTFmeA/qgDxEBEUAlKhoBC+if79haAkqaG4pF/wKjFFkcygmbKGoTNwqR8KmXJpqhNNpQz/NdfZWSIkiCLHmiA1oCEuiRHVl0AQEAIfkEAAoAAAAsfwAbAN4A1wGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/YsA/QAASkCIRTZ+0eXljiVUsrG/nGFJeStkiFhYoWNFtBg3whMtzncjZkpyKwAAMChcLBgAZzJytGs3WkR7e1JjqgAAql0A0Q4i2wsaQSJPph1Dqqm2vrzFu9LSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBDAIwLChw4cPGRxIuBCiRYgMEh7cuDHhgYoXQwbIKOCjSJEkOaokmNDBgJcwY8qcmcAjyJMYV+pMiFMkAZ49L2rUqTIhgaBCBRxFCnEo0Y4CEMycSrWmUqYQn3IEirXhTwFdHTrVWtBo2IZmzwYYS3ZgQqlU48K0uvRsW4Ncw35Vu1bA3bJX1aY9y/buW7mI6fL9yxKs2r2C/TJGGJhw5bCF2x5GHFex2smU+UK2DBrAYMyXu2Ymu5nzVM92QeftOhp16dOqU2NdrbW1a5mww952fLZ27uF1USc/Ljvq79e4sQ4XPXu35MnRmWZHyvup7+cvg3f/nf64uvbrjLcHVd+zO9Hv4MVLb06duO3my63nPz8cLvi57OFEXnHmcYfeXwGelKBI7u3k3H8A6sbUgHoVuN6Bhklo4H4G9gdhhBwGRSFtFraHoWYaXhiiiR5+OIB8E9JXnn3MYZeiiSvi1OBK8D0HI1IjYmWcdcjxtWBIOxb14Ic/iigjgTQSiZ+RN+p4ImtLQthkT0EyNSR/U0aWo4JX9pblf1sK+GSFUYJp45gMVklmi0weeVGXSH3ZYZiWwYlkmd6dGZ+dFuEZlJ4XFikmlXRqSShEBkxmQIknMYAApWRGytikUd1kHQKenqfpX5wikMCpqKaq6qpWgRpZQrDG/yrrrLTWauutuOaq66689urrr8AGK+ywxBZr7LHIJqtsraPeVSoB0EYr7bTTHvAstdhSe6kAzbbFqQEHZCtutNuCO+645UqakLnnZltuuO1ii8BwoQYlEUVnpdScSWGRxG9X+tr4L1b+1tsTA4b2hCiLfCrHaMO5+XnnmiS2ueebVEpsUZJbyRmnxllRLCSmcSra58MYL/qZyF6S/KfJDkcGc8SLsZyny0nNrB/K6Xn8J8gPJYzTwlbqfB7QYgH6ns9JId2Q0CcRPSfEO8tM9dE121gfzwgyvbHXTSntoNMMPRq0zYfi/LXRG3KdIdl9wQ21T2qHzbaKbqMIt9kOzf8dktQlX9221SmfvLLWM+aN5d5gP8QxVIzLjbbCdTsuNo+NJ8343Thm3fPWhPccueJmjn7454mH3rXppAmO9+ldg9564THPLnrGnseeuu2r4676277HhjiUpAfK+n200wx7hrIjf7vKvAMPvXCTD1150pxbubnrnavVLVmcqmXp9Whxq26nkblq2fdalWqwieqjtuz89Ndv//3456///vz33yv7T7lWvKolwAGSa13nY5cBD8gteC0QWuna1LocgIAKWvCCGMzguhz4wHk1532VmohC8vW4g9iEhCUBIUpKiBcBuMRFVhkYUxBWvaiRr2yXU9LxatQz/zgqc0+rId3/LJYo7mmveEvzIZr4FsThsQmJYwue87qmxEEBkSF+uwjgXmZEMm3PRlX0EROxKMS/3bAv2fMiFDEXxt+k6SRZtMgWc9bFj61RSW10zRtFEkeIzHFtdfzZHTuWR87sMSR9fMgf7RbIpg0SKoVMzBgDkEiHLNJyabTj71AUSbkccmJOrNgjTXjFuI2yhZ3szCQr6ZUzshAwO5QSGF30olWWUYuuzGHHYummHtLyk4W6pRxzmUlBbhJLqayKLUM5MiIyLHlVix4nf7lM1BHvmKWTIg+pSM1SspIhl8ReI7/2RV/CsJq6u6Y0F6dNWZqzTt4Uph+JOc6wlZOb54wnM1vm/8yi1dNy98xQMqGjT2s+EZvGa2cv8QlPuAGQKOHL17ao9FCduC99KoxTRVdSKlZ5lFVvyeif/EfSkpr0pChNqUpXmqyNqqRUGYxpTB0AU5na1IIIlGADH8hABXYwp6TaIE8hKNShetBGCaClAxKSVHgOR4ZIKRgKnypSi0i1X8Np6oe2pdUl6hJyCr3Y8wwnvJ519T9czWcxHYnQpfESSM05K3jS6tR/au6UsAyrk5BKS7r+cK3kxGtj3rpXs/aVqWq1a/kCqje9cimuhxWAXN34ysE61p/QxNryUDTZ3/jVq4C1p2DdUkp0ZqizrvmsFUML0NFShrCP5auLVCvGr/+SEraYHWvtqCfbrSK2rpkd3DqzOb3xQHa2v/2rYnHIWHYWdz69hRBtKWvbFuJ2asF9XVm7hlrOTFePlSXtdQOX3e5t97SR7a4nw/vay2JXt8o7L2fTm9jyHrGtUXxujKKL1uSCdrmmxC/mxovI4/pWsvWFbzSnKD2y8tawyEUwcBWs2eEmVL9w5e9c/btaAE+SvaYpbUG5S98J9w7DRbSvGr13vo+6GFVcfbGLgeqskGI0Mi7lyEUtE7/csPTHQA6ykIdM5CLPKscbKeACrfWWoRIggkHdqVGLylMo15hbFLzpTancQXrlS4RVxQiIT9gvMgNszC7sJlTt9c0AhLP/fKy9q4Dx2E3J7fNm/XzviR28TYHWOXfM2x2DG4viZ77zh3Y2qCjnvEv3kvfQXk10Og9qYbc6mouzzKekA63OQTuXz+5kKKIBjaLm9ZnQoF6onzVNaiyZOtQN3u2pkfnnzbpa0LMmbqrFKupIt9pMr1Y1qmUN62my2tbAxnWxP01sYdP62PK9dadzfeFdpxjSVtx0qZXtbF03m9erbuivAxVscA87vp4200BpYtptT3vZ3kY3tZNY62gn+93drva3r93rbI97aeXmd6zlDe9ArRs47ZY2pdOtb4Lnm97QfvCkF13p/Frb0P2urbYVTnGGW7rQuc04dR16PvFNFMfn/+vxp8IcNiQfpKMy/qiNLWPkmtv85jjPuc6ZlXInMzkqTrayt7i8wHcFncZDl3KViW7Ao/aM5Q651wjLXF3ArPlgZiYYmq+Ok6sCrM1vxmGcF+vaEBMYlIpuZtk/LOKxM/ffDgo4xge+4HlbfN+xTTs/1y7i5sa77tDVO575fnZAqliT9iY3t83NbIcz/u8VljinF273AV+ajoc3ZuIBvniBnxvwD7+74ws7cbUzGqwg1zPdI2/cO6ctz4/eM95DvnrhSt7dlC/4xy9O+8+zPvCl3/vpb3t5w1PY9q0X/OsJX3xGZp6tm4975+fue+TrXvSgz7DyKQd7TB9fux7H/v/v97t963Uf8983b/gtn3o+ylOR9Hx+YIdv3eafzfXcZ377vS/70ec9+INHf3m1f+jXf9lHepPXcZWnQ/YnTvInWiymUyaXSy5nEDuGGionKikHdUmTgQaycyAYgiI4giQ4PxVYEN/Sc1NmPjrlU0XHdAMkdOAzQVq2ZUrXZc3hZVRHVSjEdZWCZi8ETz6IEsPRZmhWeM6HbRoHd5hjhFVnWQRofCIHXiOWIU7odgFWcWxUb7eHJVfoYW3XHAcXE8AUMjbyhQ/YWgLYGGMIIkyoJGiYfvelhXQWccnXNXFogOP3eAbHhXdohe/ngHK4YnRISH4IfICIf73XeAdIfc//Jm7IFih5WHvgt4CGaIeIiCKTWH2VeH1biInkh4eBCGdg6Hd9CIraJ4qKqHqcqH6WCEmHGIqJWH6syIh76HmPOGqRuDSbaIvWF3qfCInR14SjKHalyHdtGB4JZya9CHm/yIcQJ4xdyIzFiEbHuIaklYy1VIWaWI1H2ICkmGnS+IfduIqxR4mu6Il1OI5YcYIEkYISiHI65YEXQo8m4o4DAXMxB1LoQ3Ml+I8AGZACSYL4KBBKZkA/hwBHx4JR5oJNR4M1KFNIN4M3+IIV2XQ6CDBgNlX7woEMQRJBqFwC45EjkXUzBHbxN4hxoo1WgYXLqHj4Bo3iZyAs+Y1RaIa0/zhE+sdj3eSS3MhxpleIqMeTCaaHz4iAuKeA6tho6dOT1ziMSiJ3i+iM9eiUaShnUNkxUlmLVAk/VqmSmjeNMJl7wMiATVmU6DiHYsl5MYmLXWklNfmE4gWOZGSOZnR+UpiWZBKXPrlx90aWMsl+RGlieol4ayl9bemIb7mXX2mUnSiLSRmU62eWgymSYAl9h4k5W3mOrQiXjVmYYUmOQCl8Qkl8Z0mYnUmImRmV0zeVDfcpn5mahimaf6mUZcmUlflfV0l2u4iYgOmWr3kefPmUq6mVrcmVwUmTsemLj5mKCSiZr2iaudlhu/l2ERhlE4iXLbeB6bOPHjWR7TNzGP9Ikn0xkOZ5nuiZnihVkAAAjw3ZggkUn/A5n++5KUWYg2eIn/m5nz1zn6XBn6oYoAI6iwNajv35nwBKoAZaoF6onweaoAhKjRAqoQ9aoQxKoRcaocQ4oZLooBnKix6qoBoKohxKohYqog1aoiY6oh2qoht6oguKohgqoywKFf75oQ5yozj6ojtao6SlozEapCkKo0Paoz4aYkBapDS6okuaoyF6pDb6pDMqpFOqpFTaolAKh1KKpUYapS7Ko02apWi2pVwaplr6pWeapR2TpFXapmV6pUwKp2o6pmi6pmQap1aap2r6o3cKpnLqp3r6poG6p3RKpG6Kp4fqpHXqo4X/2qWkxKaJCqiRCqWNaqZ2uqiP2qeSuqdIqqmXaqiCOqmf6qgsWql/mqakChiQGqoBJJ/1eWXs6Z6w6qqzSp9Xpp64mqu6uqvDEqtNNlQJuZC+ypC1+qqwykEW6ZAxmJEEs5E76KmjaqlQMYQh4XUEg5LaiUnQ6qWgunuzB0fVGHbWiKkttKre6n9qYpe4lK2CmKqNYa4z2ZxIOZoBWJrluq2ZWp1ZWJxQsZn8J6352q3xmo6QSa/Lh42UAa+beq6NmK45eZd8p7CoCpqYSZtjaZuBObGnGq2yGZqZaLD5h7CdSq6qSpwWy5a/qZisiqgrO7BqebK+ibHAybI0q6iX/zl/Wdmvx8mZg2qz7jqXN3l/D7uuEYuv96qvL4myMquyNbuwPkuxOMuvpOSvBQiwRyuwgvmt7qeuw8Su4Yi1GtuzWYuu4Mq18+S1xkiy72q0A8h7ZTu0XVu0asunSPuTtQmdS8mtP5uwJvuxd0uak8mxYhu2izmb7VhyEkWBtJp0wzqs9giX5Mmrkju5lEu5jutzBxmD4BlAmwtRnWtRn8tRMBgvRmdUzDpDznpmcsm3c8u6PUOtF2GtJxmuKbm3Iwu2uKm1BWa28Ie244q7emu1bau7aAeAB2uvJdu6t+uYBOuckQm40Xm1tmuTbru1cHu2cgu8Acu8L+u3F4u3t/8ZvBsrvszZvPP6t/UauOQrquubnPL6f88JvXm7vcK7tnXrl98rv+FLv+PLv+XbvQWLvservv7LvgXsvuYLv8+bvtGbvNorvVALgTk7tTv7r/0LwRc8vGRrvcYbsshrvw/swNyrmjCrmRVctRkMwtPbd29onInpmk1LuC07tg37th1sftm7wn3Zwjr7wsgZw4JrwPUXtH1Du75rqilMtzcrwVLbQlSbl0nsuiuMhEJ7wzZ0xKu7vPWrxCNsuEwxrNlJUYtLkY3LnTwWuZWbxmq8xv94ucCauaQbui8lxzpGx0lmxy83uu1SulV2ulGVulqXxUg8uB0Du1ZlklGFrTn/vMVSHMFqOMFOfMJQTMjtC8RDSbzBxLuWVLuMrMVRbHZ0SUlGvMifTL2YjJNWrJMiO8hCrME1zMHxy8Dzi8GUfMAMe4sKDLI4vMqC3Ms73Jsm7MM828oq3Mlsh79KC74ZG8QzLMO3fJS5LMAeTMC0TMxc7MhY2cSA8cRJqMNsC4XVu7vX27ukXMvVjMAA7Ly6fMXlbM2N3LEV673JrL/LXMlPW8osDMysKcwWbM4ibMz5DMnbLMndDNASm7sbLM6pDLG8fND2TMO47LALTbQN/c3XDM9RW8L7nLIwfM/+XMwYzcRdMawNMCMlHRYNgMcWqNIoyNLv6NL5OEYpfUUz/83GNn3TOD1kZWyrZDzG4UmsjOvTnAvU4PNUB3DUSJ3USq3UkeIRS/3UUM3KzczMvAguUH3VSK0RVo3VV+3H9gLIMyTVlmzLDmLIYpZC+aLIFa28ppzQxRvLA9zAIP3RF/2/JCzPMavMM+vR7uzJdu3F6izNu/zBdd3Xbf3KCg3X0yzXhT3VD02Zp1zFij3Y1PzP+PzLAt0Y3KytbO3LfRvA+SvL+3vOY03aLnvXoD3Pol3PZO20rg3Zbp3J47zJWOzQre3Mpw3Y5xvacT3Llk3X7/zXHpvaeU3Pe/3aVM3X6Izagc3bi+3bc23YvUzFRazJrVTbFh3c0k3dTTTb1//dzo592wiN2G+9wL092r8t3ZitzZpN0JwdwtEd3kMczuW9zqpM2Not3+m93Lodzc5N2Yyd36W938/8vjhB0iZ9FjO908Ya1DpFNjMN4Tk94RRe4f7D4MXq4A3e0zzN4RvePrbdQlzN1WKt3AP+rls94kut1SoO1SEOGCQZMPDd2CdOWmYdEYjMZtkdYuBd4wKe28Pd3D7O4xAxARFw5EgeAR9Q0Je948dsEUae5EeOAagM0BCBAQqQ5Vqu5Z5S4sj92OP9exOw5Vt+AVWOzxBxAWS+5R3w3t7c2TvEAGu+5RMg2WjuEGM+51pO5V/75jPeXsXVAXqu5XYO3ETeEIP/ruVm3ucG7eQBHQBYnugKEAHV/eeHHgAUIOlZvuRpa+leDtsLpuaa7gHdvcIN4QGaTuid7udTHDwfkOoK0OZ16enEkelkHgIbEAJrXue/y+oAzTqiruW4rutrXhEv3t4BgOpkvgECsAFrzuhNDufBk+dazuzOTuayLsqd7RC2ruWwQua83uuNLu3TE+wK8O1kTuqzbuoNIedbju5ZvuirPu60fjyRnuXwnuUUUOns3hDUfu4JseV8Pu/RXu/aJOj4HvBbXujSnebervCx7ub03urF5e4ALwB0zvD6jewNce/w3uWe3dnrzRB5Du/yXupWXuQUQAGwsvKc3q4T/+sK/xUBLJ8QK08BA4/yd54URPTpyf3l0rlvsALLO/81PR/ytD7ySeNMx05aIqEBAqAB/azejk5YUC/1IYHhSXfSXbHgQu25RP3TwyrhNG3hZn/2aH8sWu/hGp7hbf/2bA/3IL4vLb7UTV0Sdb/iSO/rmpjieS9C7fn3Se3VBwPWUeXzYJ74pHTjUZfjB6PW+O3XVH+/PEzB/IzCho74QR/bZ27fDB35mm/aEA3NEj3Z7LzWSf/ZQl7cq33cuA30og/qEW3Dpn/flR3fQ37Ys0/0nk/RoL/3Mk/EOt/7cYv6fC/c8UzcwczRP2zizg/7rrz7iW3ez43euP/8ry/7pE/71P8P4NBN49j/89pv4NxP/Nhr/DEf0o/M3k7v3jBf8BRP37I90cX/+03/46NP/rwv2Kdv/46u9AARQOBAggUDAECYUOFChggFCDAYUWIAAg8nXhz4sOHGjRo5fkzoEeRHkSM7CiCAEePDlConmiQJ0aXEijJnGiwJc2FOnSEF9GzIEyjLmxGJFi0IlKFFpARrNiUotKdUnVRhWjV5FGoArVCV7rQJ9elWrj+/+jyLNi0ArCO7Nn2L1EBaAw8bkK15F2qDh3PP1hXg9ytgwUoJ0417ky9KsosfPoYcWfJkypUtX8acWfNmzp09fwYdWvRo0qVNn0admnJhoIf/9n0dGLH/7Nise7oeDLs23bVsBTgAHlz4cOIOBRxAnlz5cuYH2oJ8HrN3dI4aDTTHztx6du7IpwtIMED8ePLlzTt4yGArA+onv79f+/CAeqjsj9NvyuB7ePP9+yNgaqv2gjIrrQGXKvCsxG5acKb9/IOQPADDgivBrw4ECz4DGSOrQZcejDDCCckqS0MFLVQKQ4U8VIlFjEAM0b8RO0RxqBqnurGqHK/iUMAevYoPvBghnNFHEy/cMask3fqxwpa2gnHI8oqESkW1NjwyxSaRcvGiKKUcj8oKs7SRTByfrHLLor4EcwAxuVwSujilC/I7NJ0kkU0w3yzKSuPM1NFOErt8KUj+/9oMM8A0AeWRUSXvhBPSNQ1FVEJFx6wzUyyDlJRBNW/SU0o+PXWUyVLl7HQmQiUKdchRVZ2zuljd0/TEVF1aNaJWY3wV11kJPJXODW9t8VMHKa1UvF6LDVbWZk8idiVjP0Q22WWlfRbYWpGM9qJcDdo1xGu9/RXBbM3ldNBpVQpXxEvhPDfDbbXsdqJvk6q20nHtLVfeTf+1Vd16JbJNJ8ASQDhhhRdmOAEU7NpqsYJhws0w3XKjDeOBDVps44IcUy1kkUcmuWSTT0Y5ZZVX3mxikypu7eKYM7aY5pldHgnm22S+ma7uuJtLvp+xC/q4obHz07d4V+yXaQCuO1q7n/+gjlo5/JBiwDkBri4q6/TWSzrspq88cT6w71vvIg0E0GCisbZ6e9F5y5z7TIFJjOiBCip4bO8HDIq7qcDhrTvQwnn0uKB7L+JgAccfc3wBDgoavKjKST1cyaV9SjyqdZESIXLIHbeA8nctPx1zgJHc3LjOM/r8pgciX2B0xyVwKvWZLoe1daUzZ/J1gRaXiPbaHzI+dwqR4t1X38UWlEbhC5qddtsd/1ug5lXaHlvg5Xw+dn6nH6gB448X4Hy9ur+IfX7DD3964qk//3rstdfdJfclgv57YQOWHt4Gcr4MCCAD5xvI/iKiQJyM7U/+c1a6AijAAFQvciDIAAiMlz3/iuSPex703uq0FL+7UTAAoTsf7USgPBIxUHEO/J0I6TasElKwcSmM3OQSCML28fB9EKSVDO02QRNWAIcLqIDploe6JaruRPCToI/I97EjAs6HNLmiUWDYPyHqSH7iu4kEHjBGMj4Ad0psYRYbCMUuIq6GJryJC1lIIzY+MYppmiLc1IhGOgJRW2181BvhqL89ztFIfkQXIIMnSKToRSwQ06MAHNlInu3MZpbEGUh0ZrD5dawxLANlKEU5SlKW0pSnVE0mP7JJilWylZfkJCxfqUqOsPJlrpwlbxDpL9bt0mm+JFsve6NI8BHzf8K0ozEjOMxkNnOEwHygMoPoTGbi/6h1XKTmDLNpzWoi05va/CY4nynNP3aTm+TkZTjPuU3DodOc2BwnOxsFzRjK05zTVOc83fnLfQYznvfUHDTh+U+CitOgAF2mPYvZz2gqdKEORWhD8xlQhtZzohSFaEQHus6LmoqeG41oOTt6zIJytKTtzChCQarPlIr0pCwdaUhXitGYJrSm+HxpSP15UJjm1KMVnalOJepTkvK0p0alKVFlukUHBvWnLU0kVN/J1I9Staq+q6NSbapVnCL1qUJ1qVeLalKxjhWlN20ILTdiy5zhsq2yvCVc36rWtLp1rrOhK0MAg0q+9tWvfwVsYAUrmbwuhK2atOsqE6tYARTAsf+PhWxkJevYh0zWspZ9CALwutm/ZLWssrpsaCFbWdGKliVUqxpynKrT1W7VRqU1bWNhe9mHnIBrN7EPVq8KVNnOdrKk9a1kHwIBdemWt1UN7m97m9zHDjciFbBAdKVrgTN6zrhS5SdaEcTcyAKXuwVwLv1wuEZ6epasOPpuc5fL3fCW74gLKMELr6vddH72JOml7HqZ216BGPG9k2ytRq2KXZ/gF7z6TS5/b/je0sFuvlwN61m1ZGDvslcAxB0ICt+7gOoGWKUDpi/TKIzg4LbXghsegYPLu1sCG2fEI8ZwAPx7wQJKJgMkGB6IwRrVEA/1tfit8H4vPJAFO67GlMmu8YPtG+GjohfIJPYtf2e8AAxSpm0lWvFxefviJ8dYIBteAAexXFHzSvjH6Q1ygodMkBOnUL5ZbrFFl7zdJ8PYICM4YnWTDOce+9jMTkYzlGfLX4FIAIcNti6fIczjOYOFy4H28kAkMGlKT3qSeyYzi/vsm0d/l9CEy7SW45zmEgsatp/uk453nN0+i63TFo40qONcWIUclrG0Roita0nqKJu6tA8xwSflithhrzIgACH5BAAKAAAALH8AYgHbAIsAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev2LAP4AAEpAiEU2ftHl5ZEkUrKxv3krZJxhSaFjRolZWMITLLIZOWZKcisAADAoXM53IywYALRrN2Yyc3tSY6oAAKtdANsLGlpEfNMOIUEiT6YdQ6qptr68xbvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQcXLgwYYCHEANQkECxogQPER8mZMiR4MaOHT+CZChy5MGEBDKqXBkR5cqJFilegGiSpMKaJwWovKCgp0+fDDKWxDlwKFEARokmxemSpdOWAlJmpPDzpwaaRz3ezIpQZ0YNVX9yELqV69KaZ02mHdn06dO2DxmE/UnhIdeiZbM6jEh1rs+ZENeCFBwy71HCHOG6XamYg1+fdu8iNazUK8THPq8GpsyUM1rPakGzjbr4LWmIPDErkBBA8mTXex9OUN0To0bRg3EXhq078enSjH8HAEtbBG/eEEXQhnz7uGTENnlLBa6yrYflCjg4v7s3Q8Lv3zeA/whbNwD0hr2jP0+Pfjp1qCm9gw8/Pux2s17nz98QdvN9vezlJN171ZGmH3j8VaXdesgFIJ9+4lVVnnkBGnSegAyu5x6BFMZ3oAARVmVchtxZFoBcqmnWHIn4/XeYcBzClRpmE7TGIoBTqQbYiiW6WNmAHG62oWOYRdbjeioRN9dYUPnYmZOfbUigYij6VR6Uoe30WFBN3vgilqNJ+Z5iEvmlIpi5vTTBmmxOYFuXR8bZooZBCrmSBG2yuSOahdXJ45xyAghknWSWxqdvflJ4qHqBvigmdYUutih6iV5oYYUFWZopplo9ClykbhlwlwEJicoVqQI0UGcDpY7a6qmvZv+FqqmygroYqzB++uGuvPbq66/ABivssMQWa+yxyCar7LLMNuvss9DSetSsrgogLU7UwmpttddiG+u03xKVrWuVHWDuueimq66oCanr7rsHaKrVpBj2aAC8+Jq70b35qssblxwyEK8AABPIgLx40XspbwcU/N7BAjTMYYN1EhBbkAh3pfCmg2Kcq6FI+mmxiTFymrCXP9Lpp61OURzkyJWarDHKT6pM6MeShlzxxSVvPC/NUcbs6VMucwjzyjK/BnRoHZc8dMs6v8zzlElnrHSjlT3tFsssFU3g0Tf7fDLWnWltmtkqef0e2B6LPTPZQSONdkZqU8d2z0uP5vZkcwf/1zdWJSZ6N9V7W204zrr+bSR+gk89ZtWQF454aVyvVDdwgz8uueSKwxfk5aVlDmnkeefWNNWdg76Y6LpuXnphnds5cdRGOz6663CHFvttqdP+te2tv+4b50J/7vvawFNOeu562+xx74GLnPxih+OO3+4dGh/9ziQTLjyjgF5fPIfd1oSqqtyjzyGu5Zs0Lrjbatv+SKjujmvn0Oav//789+///wAMoAAH6Kv5geR94gqX+RS4wPjVyoHwM2BHEOgtCNakX/BilwAcwMEOevCDIOwKBuFVve+h5zj8GmG69qXCczmMOgJLSAIGQMMa2vCGOHRAQl4IHIhZT1DrkViQ/yAmxERFxDsZcMrIZojDJjYRAdPb2vLCB8QSYa9ypXmABSzwnS0+QCVLdKIYbQjF7mnOhPWioqPGZ8QOLOCN33njAjqQkTCOcYxljNkPv+Q8pxnxIRWQYxzfWIE6yvCOeIyiafaYMiuy0U8RkOMCBvnGCETEjoh8oiKdUkLmma6PqPujJCeZEEmG4JKHzKQmzXg7NC7MlZu64uSyOEpKvvGLD8GkKsm4SZZ0Uo2NFJ/cEtWAUZJSAMaEiC53ScM8Io2RNXPkMP30AGPa8pa5TCUza+jMsMHyZ56E3SODZEwMCAADycymAJi4zWb2MjjQ/MzpHoe9AFRTkh/AwAdGif/LACyTmd1s2zfHBsyyjTNIITDmKEeAynW2k5vvLFA8mQZKev4xkgqVIx2Vqc12BhRv4RzeQLsiy3o+xAIZXYAFDOnQhw7go94LKfiqKMybmTQAxcyo+tTJTo9GlCwTbZ40bfrHh0TgAUhN6gMsydKebhOmZ5TpCUfKt4MW9Sn/3CVUWynVNNIUQCW9qt06+tSfwqmg8qQqFn05S7FytKUP3WrwuvpKusbSqreS3joTwNe++vWvgEVBQnb6HvZx67Dyq5b91goVAjr2sZCNrGQnS9nKIkuCHKFgAzG7EM26j4GftWAFOdtZ0NLPtOSySQvTpcHVrnCKX+WjvVx7Lhb/rvZfdYohwXL7y9gGE0BFNFi7eOgW1bmFdcoL6ieH+jztMU6velQrbNc4TQIZF6tm9Y90iVfd91xXidn9k2+jiVbd4bW4xxsrK+daXqG2Nzdhnd32pLbe5G5XuvG1bnoxF15F3deunTov0fYbuv72VrbvFWd3qfNdliCXetP9LYLBKmCozbd29YWwcndT0dFB77nciy6ACTreuBFVviCmr4gTLNIRk7TCXSPw6gwcYfKW2LwLBk6DV/JgKW64xTVtLopxBN1n/pfFNsmvd2V8XBr/eKbUPbF+L/y7DPv4yDcOE4wtx2TsWnmRWJ5wlIU85RRjeMVZXi6S27NllZD2/yDn0ythqWPYxNr5gW82SP38dD+PWfbPgA60oAdN6ELnuSCePa1oQ3togiT6gKidYKQzO2mGPDq1XhWzjTW9aQl7utOYBnKaRc3ptLr4amsO9YFBbepU19XVHAt1q0cN5U/PutQUPTWswUnrWrM617smsaxJbWv39nqqp171sDNdbDUfm9m/Njauly3sZnM42TW+tbWprWxgP/vV3461rsPN62k729zXDvbbqO1rbbv73d5GN7Gjze5uS3vb84b3venN7WzvW9/nxne7481ucavb3v8mOL/zXXBUk3vdD4e4vAee8IIjPN0Rd/jEka1ubIdZ4BzPuNU6nrGLY3zj0P8GeL/dZnKGVzzgCx92y0OOcnDX3OAZJ7nMZk5zkKdc4QdELJ7vHEGhF53oCVx0bgrN9KY7/elQj/quGj2QS1Na6ZDGetYLwPWue/3rYOd6QsJOdrInBAFGTzrVn/zz0ZT97V4fO9zhjpIUupbnqvY3zD8zd7oLoO9vT8gJiLsYH1JV59IFfNnlrniwJwQCaL45ufCO8xc1PuyMv3zXH78SC1Tg86CvAFPPmnOR7xxymv965lPP+YzcU6ESPbzpE5/6zf+99mIXAOQjktOMlgCospd8tVXuG9zn3vgFaD1EUJrSBeyU8qmF/vD5bvzVa175AXBj8xdQSO2OW/gSjzmhepBv/ctjfwTbr6T3O454bJP/9rhX/uu3f0rxgt/j7q8+/GuvfObLMZ8HggEkYH8+h2nSF36d8X7vt3vZN0rmtCsEKH55x3Kop38LGBH+twAAeCBJ5F/fV4CVJ4GXooD6x4APkX795IHsN3v5F3/7x3q653rbB3wfKIIhSHw2QYIuaIIPkVAZNXoRiIOydoAaVxk6yH8xmBEYZUzdR3qSFxAAIfkEAAoAAAAsfwBiAdsAiwCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/YsA/gAASkCIRTZ+0eXlkyNQsrG/eStknGFJiFhYoWNFwhMtzncjMChcZkpyLAAALBgAtxc1tGs3ox1E2gsaWkR7YzN0e1JjqwAAql0Aqqm2vrzFu9LS0w4hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBBxcuDBhgIcQA0yQQLGihBARHyZkyJHgxo4dP4JkKHLkwYQEMqpcGRHlyokWKWKAaJKkwponBajEoKCnT58MMpbEOXAoUQBGiSbF6ZKl05YCUmac8POnBZpHPd7MilBnRgtVf3IQupXr0ppnTaYd2fTp07YPGYT9OeEh16JlszqMSHWuz5kQ14IUHDLvUcIc4bpdqZiDX5927yI1rNQrxMc+rwamzJQzWs9qQbONuvgtaYg8MSuQEEDyZNd7H1JQ3ROjRtGDcReGrTvx6dKMfwcAS/sDb94QP9CGfPu4ZMQ2eUsFrrJtiOUKODi/G3t21REaRoT/rRsAesPe0Z+jPz+dOtTpxH2CFx92u1nLyqtqEKAh7Gb7eq2Xk3TuVfdbXz7t119V2qmHHETe+ZSQAFWRV56ABpk3oIPqtVfghe3Fp8CEYRnHIXeWBSDXTyRmBhWAh2FYkIYZCvchXKn11KJPFLR24n1TsZjQT4A192OAMFbmYYGNSTjkT5Gh+GBE8e041otHxphkZ0u6p9iKIz7ZE3lbfraTkxT2FBSWUmZZGYEfbrZkXztq5mObKL5EAQUT7mkbm/eVGRqccYK4kgR8JrQnBUUKOpppKZLlaG6TFtYldYpBFWmUgaoHqVM0ziijVpX6dilwmW626Z2d5unUBQJc/wDqqHiVmh6Kp5aW6kOwyqqSAXcZkBCwXAkrQAOFNjBssMsW22xWxhIL7a6lKWsjqhNmq+223Hbr7bfghivuuOSWa+656Kar7rrstuvuu/DGK6+20h4VLbMC1IvTvc7mi6+++z5rr8BE8UvUAQgnrPDCDAObEMMQR+ywABFXvHCopLrZ2XEGWOzxRh17zPCaHzJwQEIkF2gyyoUygHGtGn/G2wEpu+cyxTUXClGvThEQW5w+r4oprV3Zeh6hcVJb4AMddDBh0w+oFLTOAUyt88tFxzxoh1QrTR0IC4Q9YdgLIBuR1YWinTTRr2k9GtI3XhtnBWSPHXYFGan9od5Msv+Nddt43pfrYl6XFgHZC9gddgRn/7y3430bvWHgAQ7uVuGLIZ54Qoh70LjQwPHtpd+kS16j5aah/tQDmisedtQPiU6d7NiaLqrtWqnOEuZPab65AL5DRHtpwxNeutuUcn213NSx3jrnmsNevFvTm4Y7zJTHqHtw26/kewYCZBB87JC7V/2s12edvZJdM9+85iJkIEL0wpc/u/21I7+b/qa23/1KHvCd5jxXP9ARD3+6Ot76Nqa8QvHuKYcTINkYV0CqnW93CmyVBivnP6pBhG4S3EDeELiYCwYnfYDboPY66MEANECCZRuhAUtIwstlEEn8s8n/3tPCh0TgAUAM4gP/KPg5C9bQejk8WhJzskM59dB8R+xZFDGIwr9Z0X0JbKLOTCi1KZ5wiTWqIhaNp8W0ebGLM7ShGMVYxgdGxGxASwgcH3esZBEsYP7qF8BqYrCBjfEp1trhvAZJyEIa8pCITKQiF8nIRoZrjybpY8HuGElKVjKP08KkHyE5EknikZOu8Y0DRknKUprylF0RmcWuuEYHhUyVDAMZLBXGmwQM4Ja4zKUud+kAlsXpZq1c4Gdo1rKH5WwxtdylMpeJgDNKCoy3g2buWFigZC7zmrhsZhqRKMytdXM0bfzjSqyJzWtqs33BVCH7lrdDcpZTmedcXjpxiCtqused79RlPB14/0MtSRMv4WynemyZT3g6E1D0VCcD68nOOOGzoLfc59rm6U+G8lOgKCIoRHMp0bhR9E0NXBtG76PRjUb0oP/5p/oU+pmAOnSgJuUoSo30zeTV1FL2pM5DIdrRyKk0hQnlYEM/tNOC9nR0H12o4HIKnKLm86hDS6rMQhq3kQaopCaFav5u6hs2MrU0Tn2nVhMoVW8udajVhGlMT7pN9P2UlVTtm1VjhNWNjtV4ZX1bXEc318rUlaczvVBebXrWi740o2tlKzrf2s91GvZDoOyIsRJA2cpa9rKYTYAJ5GhHTU7Ss5/8F760GMi1OfK0qE2talfL2ta6dl2R5Ygn+WhJkP/M9pKxZchtO1lb2/ZWsr8NZU5m2bDJEDeWjVVqULX0yuOeTCHNhSVvjlmalQmAuosBJmPFSMxfGpOontpiYOHK1VsVVqSHBZJ420rF7TLWpeB1lRnZ+8XyKtG+TPwqMsM738XiN4zv1a9bphRH+hposPuzKHrjq97+ytO9/xUVfNMq3wL7l6VmXa5jF0zhBlv4wRHOWIgBKuCnEJiOF9awcit63qqmN0BGNPAzR7xSFbe0xE45cYG4OGMM65XGk5nwPfn7YX4iuKt7HVpfOxPjFLPYxhleIVqHXGEUg9jHhIUyOHHMEh1DUcYIfbKYQapgFzMYxut1MpmxnOAWy/X/xTFq8pW1nOUxc4nL4ySylY0MYTb3b8o61fOOx5vcqQLZjQbaYW4XYqw5DrqOcbLWog+yW9+ClraXviRpEX2b13r606AOtahHTepJG6TSwM20pU1dEFTLNri6hTWjZU1pWgu3xnaO8prp3GZeI/nWhvbzfYU9bF+b19jABmquf0zsyTUbwEB+djSlLWJqY8/auE72sZdd511zu9sr9ra2i/3tX2Nb2eIOt7rHPW1sk/fc70Y2sOOd7mDLG9rwZlu++3zvdve72uwmd72Z/e9rFzzb6w44wu09cIKX29wHR7fCDf5wgSfc4Q0HN8MnTvGMQ7ziFt/4xTU+cXqPvNcg/3d2xP+270MXWtcn/zjHFw7zmtsc4zHftsfHbXKR+/zmJMf5zxXec6CjfOc6z3nIS/7yoDv96EpXecpvXXShG13mUcd3xFvO9a6vfFSsJoirY63qVId9IGOfddnNLlo9BojUcI+73OdO97p76+wCSXut1052vAPAWAUIvOAHT/jCBz4hhk984hOCgLZn0u8uZ6ziJz94xFOe8iiJLnGrTvSmQ/0zl8e8AEI/+YSUALtu0W60vy550i9+9K43fEIg8KpYcfPcXkf6eWIve9jzvvICoL1KNlCBCVXg+ERM6eqn7m/d5+T3hLc89A8ffJU4z3UH/mnusy6q6Qte+tOffehGXig26IWtAz3eOutd7n3qt78A4o/IBupm/rDNkfMBxz/WQd9+8EM//g8BNvQHPIiDN8qHe+uXb+/nf78HgAFwAojjOotzgOrHfADnfDWygL4XftX3EM4zgL5DQIKlfQn4dRqogcIXAPMXgfVHNiBAUwhogR03dLlxgv3XgQEggGQDPuKjOSIEgxWIgc3HfVphg97ngCsYNvEzP5oDR/rHc563f6FhhByYgg8BQ2QDO0Aog/wmhEV4gyiYER8oQenHhZHHfmB4g1b4EAEkQck3gsvnhTN4dTZBhf+HgxARQb5jgGHmhQEBACH5BAAKAAAALH8AXwHcAI0Ahf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev6LAP4AAEpAiEU2ftHl5bKxv3krZJEkUqFjRYhYWJtgSsITLc52IzAoXGZKc6EeRSwAACwYALcXNdsLGrJqOVlEfGMzdHtSY6sAAKpdAL9wMKqptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AA8IHEiwoEEDAAQIAMCwocOHEBkqNEix4gGFETNmxKixY0OOHjuCDLlxoQGLKAViPJmSIoMAMGPKnDmTwcWFJDUqfEmzJ00GI3M+DCr0I86iQ48iNXqAp8+nAYAKaAq1qs8LAi48JUAUqUKrULsWFSuUbE6zJBUSAPtULVuwEeIqFBAXAk2uSpcmFNBzAoW/gCmEoIk2ZGGPh0XmXer2LWEBax0/bbCg8tzKCxrMxKvX4deZGBSIHj3aKczEOhd7VT2WdVnIkmc2jk2zAubLlTlsRl2SpgXSwD3Idn2WeFrjhpEjhk37NPPmMSFgXoC7st2YnDtL5CtzAvDvGGTy/444HmL5pNq3R24+G3qADtOrL+ggM7v2zzG/f7cgXrni9HsBeJ5nz9HWXnMRTEedQgpGgN2ARskkgX7fDeacgP6lhiGG6xlYYHMKLihAiA9m2BtMH1CoX0wQbrdhei3u1WFsB8aWoILyVeZgAPZ19hlWcwW5wQjfTXAhjCaSl6R5SyY1o2Q1xhaiBgJoQCJMPer1WZBcCrDBdyw2SeCL94lp1JOORSnZjZiJoIEIDZaIIUxAdjkkcEYeWSaZPpqpnntqSgZfiNPRJyeSMjGg4mj8hcmnln4GiCSabwXqmHSEYnYdljHiB5N3i4bnKJKPMhappVah+pZtmVaw26ncyf/0G4XC9Vfqare2RilbqrIFQqaZvTonaCqaFkCnsOb62q5g9cpWBBlEK20GO9aHbF8SZKutBBbaSuq3e04K6IfuvZWlqeMqW5y6xzGbKrnlgnXuaumC2ye7yblblbPxWgtrveEGfK+40PE7k2bN4YVQZwYohHBsDSi0sF4NCzDxUhVfjFTG2lWs72QG99flyCSXbPLJKKes8sost+zyyzDHLPPMNNds88045+yyxkVxzLDEP1vcMdBB8yyUzxQTnbTQAELakkUITfR0Rcjiu9yGLE1d0EpaE4ShsW/ZtFNzUln9n71Og82WVFTFNmzCntKYLNqmcjjuxz29TRteAA//LDCkdhcM77bbigqT3rHxLbjZGtK9Gt4+1QjqfjIhLpni7M39d90EZz5jiirmGYDljmHuIeMloa4k5D0d6MGiCsREurlxQ6m537hDyvpjHYYGuwSHIwqd6XKrzqTxTt4tK+yifTC68HDHerrjrSFP4O7DrTc57CQ8X6Z7xNtuvVHj/yn4erOONsKd3zEwO1vhp3k74NQvqzxMIQC3gZffCfe+vLWTX/kktbnH3Q8m6VPAXL7jvP9ZJX6Vmh/nCqirAwbAd6JZIGkoELzvDS+AERxg1TrnoRm9LoMKAY7soLc3EPJKgriqX3Gw158ZKQqFAiCN6BxYFQi+UIQwrOD5/2gCKg0qoFEd7BP4XNisIL4GiDQME5r8MhfAGM57Svyg9Ionw+NA0YLD2eIKPRi9vtGPgk8kodx2NRef8BAqPmwiEL84xKtkxSdGy0nFHpY4hzUnYkxbWh5JgjSMKc2Q8GILIKOos0Y68pGQjKQkJ0nJSlrykiobZEgKubFD9syTnwwkIjXpEU6GkpQdMWXTjuOAVrrylbCM5Xa6ZpARonFdpMoaLW8CAF1qDUMJGIAwh0nMYhrTAWOjTdm6mBwgti02bFObVYBpzGpaEwFMfNccmbmcKDonitS0pjiHiU0xCpCbZ7tlu8D4lnCOU5zlNOMEczdP3bGTLe58ZzXjuf84dDZOnfm6J1jyqc9i8jNz2wRoNwU6TSQFs6D7zOa+nIhLhSrGm8dKZEPL9FCIGlSiYaGoF/1ZEoyGbIx96qhHyQnStoi0mSRdHUOrQtCVHnR6Fv0nPQ1YR7c5dKUfNWcIY3o8oiavp5KpqUdvysWcps6o15spVJQKUaaKD6rkw6r5PNccqhbUqud0qpLoyFXaeFWfYB2qWIu61qOW1accBSox0/pDrRJwp0J8a1J/Kldh0lWOdrVln0yqUZryta9/1WZgX7pQpDrmrO9M7EQTilf7Obadh5WrZENK2TMOVqpPQaVGKpaA0pr2tKhNbQJO4EfaAFK0GVGlHkF5NNr/zrawVVlk5jDJ29769rfADa5wh0sz2EZEtoS07SaVu1xRdtK5pxwadGs73VUuZ5cDidpUsKsSxqazsuvyZde4tsuvke0m0rTKMts6psU+UzLRNCsL+yjUurI3q/fdagm7Ot/LtTRy3tWpZ+15WXz2t3T/bV2An5pfGYHWjQemXX0B22DBElivj40w/BL8mM7W01SEBaeGAThhxVZ4wTIt8EBH/EAOD8fDMSxTiPlLxhaWeLKLJet+5Vtj+sozxuAdqYwfnDcW99DF3jqxjtdI4yyWsZ9KXuyMeexkG/+4ejlWo+1E3GP/3pizWT7xlOFaZR9DOcgwFTORafLGrSB5/1RRVrOKN1pmL185jXH+7JwN22UEf9mlMMbykPc8VSPD8c16QvPVpLzmmbTZJ3E0saK/O2AQN1omxoXIHrXIR8e8VrqZfghymxtqh3gMOrr1EHFXzepWu/rVsI41c0s561TWOra3xnV1k7trUoP619YV9KQZPGxiVxrIxw42pT8s7GTjudhjtWuemd1saj/b2RVVtpChjZ4wc7vbDZ42sq2dbWxv29zabm9+LUzuc7c7zele9LpRzNZvq9veTWN3tcfN7337+9rxNva7l91vgBe83AOPt74N/m+EH9zdDw/4XdFN8IZD3OLwprjC6X1vjQs44R+PeMAX7nCMyxvfLv+SNsonLvKKM/zlJYe5xFnecoHX3OYmd3nMZ07znONc5hkH+c93PnOSXxzoJ/f40I/Oc6MH/ebRFjfUg+30pAs96iuv+sg5jt+sc73rSle21kPu86U/vexY3xiwBVm0tY/S7c8tdUNGrRhZ2/3ueM+73vceJLkzhO627nXg/d5LhRTg8IhPvOIXf3jDM/7xjFcIAuAeXZ6n/erg7hPkN594x3N+82oRr9bGnm7SX341n+e851MfeQGYIL1VWe/KpY50xbAe8qu/fecF8ICnYEUrgFZ52OuN+aTovvXHX7xCek8TDlRgLhWI/qbgPHuvLzb5is998pdPkxvJp8PCL37mx6dufOwjXvvH575MKGMZBmEmAy8OP/nHj3Ymmf/8Arh/43k/Ew7cxv2YwUemJ3Zf13OvoX8FgH66p34w8Sv/NyLT4SrUN3zWNYCZBykIqIC3x4ABgALxAYCaMoHil28FiCwZmH/6x4Bs0n4QWCgiOH8kWD4mmIIoeH8M6H8f2ILTAQKJRoGrZIH01xoneILMFwAOiBlUYiUKohs9OIKBRnTJMYQ0WIQBgIOV4SZwoiAPA4Q/WIKwIoU2yH8yASyYUS0ZNUC0B4XLAYbmx4EBsIKEEn/hZn0nxobY54bvASzTd4ZYFRAAIfkEAAoAAAAsggBhAdgAigCF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/YsA/gAASkCIRTZ+0eXloWNFsrG/kyNQeStkiFhYmmBLwhMtMChcZkpzoR5Fz3cjLAAALBgAtxc12wsaWkR8smo5YzN0qgAAq10AelJkv3Awqqm2vrzFu9LS0w4hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAwgcSLAgQQICBABYyLChw4cLExokaEGAhYkCE0LcuFEjx48MPYL8KHJkRwEEMKosmDDlypcBIsicKVOCQYQKTXKUaHACBQoJBfwcYbCkTodGj4bMqRQp06ZLXcLE2HKqygYLsmrV2qAgTqhODV5QQDYoWQUMWD4Fm7RpW6Vvj1a1OnEu3YIVturl4DWuTp4EM5w1S/aDWrAN/ZpUPJIxSLt3B0KOLEGvZZsDvyKOKKDghLMKCJO9QNAxybVQTe9E7RZlZLVSX3ewrLfDQdUnC4IOnRB0htKs4QaXO/xv8cWuX0tO/joCbcsRMuOGCDjAg92iyRLNeLxx98ffT2//5hz79WS6z2lLD796oIfdvAXAXz4ewPSH953WP3+XP0zneoWgQVAEWhSAZpsBdh12ve02AXf7sXdShBGWF5l/MFk2YIEEHphfYp0J9N5uGwiwwXwQjvfhUhSqyJxyGL4EoFYCcpjQRQgiVt2CZ4mwgQgO0udiiwlKSN2L5iF512zpLWDbehESxAB88P0mZJFE6mgkfkr216VVlTWJmUA5shXiQJ9ReRZpV2qZpZkVKrechXfl9VwFfW0ZVkGCqWkYcG+mpieIccoZQIwwgZBeV7cNupRYaqJ12JCUYukinXQh+h8GnHaKQXR5RtnTA6SW+sB2gFbqpqpmYmqVpoaG/+pirCtyFmhrhcoJa6xQzmporfY5aiurgro61a68eihssLQuC+yzzn75qrTJ5mnAZgYkxOhrDSR0LWLZCvAtWOGOC1W52CJrULfUqmTju/DGK++89NZr77345qvvvvz26++/AAcs8MAE50uut+mKCy7CCyvcsLlNoftwwhDXB94BGGes8cYcX5sQxyCHfAC0twpHoQEip4yxRiirzLGWaSnHwMgCxPwaAyQTi6uLB9gcGc4C9PyalrHi1GzJxCFtnLEwqTsQ0YYa/Wu0SiOXK4xdmmoqmwFALafUulKts8mXNmthmlU+bWbR1SVZtXdvg8f0S5ONqOaDXa8dddsXiv9t6d9azr3SZB9ESpZAXisHNtZxizc2cYK7++JYhivwQN6Csn0m448b1/hqkVP1Yp+Ve5D4a4u73bnVq3sXel3MoV05CadHlnrfn0/YutxmB1CRjT9SWftdt3uZ+5HHc/l6Ua7Fe+JuHwxPV/GZ+r0q4K32/juHwe9mut5f82387o5jX2zvA1FuOAWYt6b50eSvlrxTy8NWUOGGIw6+4uJXPz+h8TtJ/YBTnilFCm/Sswr1pvU/FgXwSAOcU0/UZKX2Ced9U2vgsMzXmghyh04++YkIKcC1BE5lgceyHpweqDz0RcaEMEFh01QoKA0Gy4OHahdGYPgSGdKNhjvjoHD/cOg0/UVMW3sTwLYiw66KHUViB3NYFJ2ok3B5kF0DLJgWt8jFLnrxi2AMoxjHSEYOTZFiE0vjGdV4Loat0WLIcYAc50jHOtqRMy5LWc6EmLQhtSyPHGMZIDWmpQQM4JCITKQiF+mAhPjsLkCz4R61JDSZfeyRUynkIjfJSQT0j4Es1E8oQUREHU5Ek5xMJSI9uTnV8dFzo4yKC++CSlWmkpXweyXrdOm6WdKllrbcJC4zGMsNXi9wvrQKMIOpyGGGTZJAHGIyM2kmQzJTmJ9MITShWcoBLvOah3Qm53gJHm5OEybfBKc4XXnMFZLzNN1UTjqvuU7cFTNY5pyaN6sJ/85F1nN875TfPYtIQHnys5+J/Kf/BhpNyJ3zJfNkpkJBGVDdVRSCD11JRIM5UW0yNJ9h26egrInQcGZzhttkaDyHdtCSDqCjKP2oSjOqko3aEqY/TOlFW6hPg47UpSZtpT13KkqikpKmO2xpSXE6uIbC0qiy7ClLfwpUpkpOp+08n1Rf2EYBJOCrYA2rWMeagBMgUTlNRCMbjyjFrl6RoGWMq1znSte62vWueDVYV6loEiiyla8j8etf1frGvcLROIPsWLASG0in7jKruPojY2kGAMkCEmZympkjMztJd0JWOJW82SWn6r4k5vKzfYQqeZB6yv2h7qQ5lalqb8hag//wcCU+bCpWPZu9rdLStbaDrW5li9ql1bYgt1VJbq9KXN5qNaQ+LW34hArQ4j7WuR08LkGSi5Hlim63NSybb38JXOIJl7mz7exzsSZS6fKPugtNr2N7OV5llnd65/1uc8NbpJVyNXOmJaZ8Qcre6F4wwM/cbxCRWV9qAni6p8Uu2eTr398++L0R5q914SZe6JL2wBAW8IbLOdMGo/O+Cswv7MC74N56+L/ufS18KTri8tUYdNpV24VlnOEWa1jCDjUxRFF8QhUzj8UTZvCLLayUcC0xuEqUU1rX2mQ3GrZhbzUl8/LK5S57+ctgDjOXCztYMle5rWU28xOtnObD2hj/yNf9sZx9TOcku1mgqlVvnVN7Y4veGc995hKS+QznOP+5qIFGdKE5POB77vmpiyZxoyd9aOQVU892nnOmH31oTEM60m/WNKFFXWlPG3rTqB41pxldaUFfer6STrSiSd1pWAMa1LemNatxDUdT71rXsea1pWe76mAXO9THznWy/dxqAObZ1spO9aeB7WZfG3vZw5a1s7XNbWNSO9qqlvapw91qayNb3L/GtquJje5rk3va6p51vLfdbAc+e9Dwbve5yw3tbAt73d3Gp6P1De5x5/vdBk94uptt7oIr3N0IX3jE3SzYMwP2IxVfM5otTtgrq9k7Yg65yEdO8pKbPCgf/+8rmzV+cYwnpAAwj7nMZ05zmL+85jiveUIQ0PE211vg93ZRzocu85sTfegtsWxiG35YpvtbUEcnutGjrnMBmACTVonkwCe+74efhuo5nzrYiy4ACKikIhdBb8Dnbe9/O2XsVYc7zRNidoNwoAJBqYDex9Qmt1P62yeR+8zFLne6GwRAQdnKkbd+8MZL3DiCJ3vkY254gmAlK4nXCgYmxW6uOxzikJ98AQgP98oPhANayTxXUtV5xz/+9d4R/egFIHvTB0BRqU+IXvDUd8BbzOkAN1PtaS9626NgK6rXypiAj2/Yg2f4w697TPSS/Kw8KYc2ZLu3tR8s6Be/7KenvlLuLQOCFLXe65/vOnK8P3nT414rA9KAZfhi/rUTnNncJ/3Y9Q9226OeRhoQApaxLcynYJ4XeN8XfQXRJFkBKvXndwFXK9DCfpFne9PXJJynbQEBACH5BAAKAAAALH8ASAHcAKYAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev6LAP4AAEpAiEU2ftHl5aVlQrKxv5UiTnsrY41aVWdKcplgSzAoXKEeRcQSK2YycrYXNsp1JywAACwYAFxFen1TYaoAAKtdAK5pO9IOIdp8GtsLGqqptr68xbvS0gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQEWMiwocOHAQgkhMjQggALFAMkPMiR48aOIAd+DAlyJEmPAghkXMkwoUqWFE+WVAgzo0SaDifoTChAJ4WHJmUWDCpUpICiB4kidVkzI9OmDpEanAj14c2HFBRo5alVAQOHSouGFTpWZtmTT6s2TKtW6lCcahdedbihK1etD8AedWuUb1+/AM6SZBuXMFTAgeHGncvwQ1cFd7X+bLnXr+CQl2ciNlyVc83NitUyXvgYcsLHG9ZW5pu5Y2uUoF/GXegZJujZDEdXKB1Z6wfKoIMDfp00JW7axmffPs7YQ2nTAkqTAD58tVviU61LrV2T+8rluBk//3jeW+sE5MIta1+6Xmxy3N4zgp/d/HkHAR2eU1efnnV7su/NFl9M1R0XEVULjfdYCB2EUNp56FXX33X/mRVgYRceViBzCAbAwHPPpbaffxNuVyFaGXaW4mcbhtdhABOA+NhkEfIn4Y3qySbgira1SN+LAZAgowIi6FUie0e6pyOGS2qonoGjLZSVjF+pliSAV1rYpIpbsvgkh6HB+MCYZD7wm5E4kpgmhV02NSBE8y0GpIpZoljnYDx2l+d3PsoZJlTYvXUnZifi2aaeh/L5pYt/ulkooYNqVl2iLL35kAF+GZBQA2ByOlsDCWHKl6YCiOoWqaZKhWqmlj4E6p5O8f8k66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyCar7LK1porUqqOGGm2prFI7rbNFQXuqtNci5t8B4IYr7rjkYpoQueime0CgBLH7l438GaDuvOBuJC+96IJWZVwMrCvAvmox4C5Cj0oK77cAVyWwAAck3FScos0JaMGuUQzbmiZSulKrDUEcQAQghwyyBFZJ7GikFaOMksaxsvwQxBksIPPMM3uam8l6qlycztm5DBHHDEGMAc1Ea+BQlGoNnBjPgk5qINALxSkB0VRn0BDSdGKMpNZKPg0rgYsuRDXVI1yNM0xKp23xzk4fB3UA80UwNtUR3NxozlxjmbeWXvv/3HGLDcw9tt1eM93u2tnF1jdu4MlNNAgc2IoR1hMb/q6aOS6uHOBUR37rgXejjXjTB1OouNtfw+mj4zNDLjnohe9tp+yGah5XnILPXDbhqFtOsO+J+a2X8FH3OXXuVvMO3+iHA6828ZRBL7TgRpsdeqXMX2467YRCj5z0fQYQ89w2y3U29s5n/3vby4MfNkMRXCD//BeQfPT5G6u/NPcG++e9Rql7WfiqQrmT8S9lB1yZ7doyQKgUEG+lM1H6/vc2jzkQf7FK3wQXWBVsCYVU5YuYAEIIlVd5UCbaUhW3tmUtFnrvVdBjlgxnSMMa2vCGOMyhDnfIQ16d8CQpfNYK/4XYQhUWkYg/JEkQszVEJHqLQvhSl7kYFsV0qU2DErpXFcdlry2KS1+46VdCHNaUhWExguxpWBjPRcYevU+EsUOjezaIOvf5B0oYpMgVE8i2zD2kAmUiE42ix7gGNuWBojsj5vz3kBiBaDrDK+QbCZjHn+lvj3IE0JI+NCQIEXJzk7zg9fKnyO35kSEiGJJWAFZBQ9YEkejjY+Jk+RYdTUmVRfrk7VwJE1iSkpbNA6ZRdFQXVUpGlwwM5SErCZRLOpOOAXCMMbUiIgDakUJ4HGUGhbm+TPLNIrVqEIg88D1J3hFMcVykKdWZMQDaKj+lqWYrlflKZqLJm7PDZ+3ASf8rcZaGnOUE5TkZlc51GlSC7AuAc4yZF2RWxYLL1KYen8nN/Z0yQcZkZQCjwkuW+HKb+oRURQnDSRl5MqC7pGcv7WmlkVL0ogtxZIiaeU0TZbOgCA1p/9gEkQlU4KdArcAgUZrMgf5IopYsZU4ZWUdzYhOdvXMpNHdUU/bcNKo6RWBWFdjU2SQxJCDsFG5MWK2vgmSJH2wiEzfqEBgur4dwjatc50rXutr1rsYya0fQikK19vWIa9UrR/gKRL/+VbAHIewTK+aAxjr2sZCNLMG8SC5MsnNr8aKsuLqoWdAkYACgDa1oR0taB4xxjS+97Byro8bZLKy1RaXQZ0lLW9r/IoClIzooZlWrSQ46yT+zra1wQXtbpDZTqbvlaVdTCtzhOre4OE3uUnXbteXG1kTBda5tcVsj3lpoqkxyKna1K1zoYtW7+URv7az70Opkl7yiNe/ykLtamCaNrUFzL3y3a9x7qlekWy0OBfFbPPW8d78DkO+O6Ks3+3JJvOw58H4VjCEGfzeh4RWobBE8Wgrf18LpVW77IOweCcPXw1kL8CxVXEvfPky/HA4tiisnVZcOuKoljrGMuQtAEOPpdCPW8Hh1nGAeW5a6DWZqkJm7YSLP2IAsDmaUh+liLzXXyUZOLZIv7OCJ4RhAJibvkyH4351Ot53s/W2TdTzmRNZ4/8oEuzGJwUzkIve3pXC2aJkrJmevVisBgA60oAdN6ASgYFNjNWxhAZtWRh+2Wi98G14nTelKW/rSmM60rBBrEMWCVdGfdvSiOV0QT58V1KEmNUFMvdi3aDZcU3x1uI58ZukiSYua5Sxlwehaf7URJmZ8855RAtuAsVHIVoXqfIW9ZRT1mck2VfaCmV3r6i75uskm6Hmb/WMbV9mNRvVTdOs77D4qmapzttBVl51nWrPn2dh2z7qn3W4to/na7VWpR7PsYwCfO8PQzvZRx51kbvtbxOhGtrylXWFq29raCQ/4wrXN7nKv2OItTvOL9b2Sj06032Z+97dZAtF63jm31f8ueMr5pnErP5Xi9Ma4lGVO5ZaD++UD3/bKQ7zz9eJbzdGGecPrDd77flndDP+ww8mNcIDHG0DzHjrNuzn14I1cUeGGo84frnKus/znG886JU/eXYOHnOn3jrhaVD2QsDKKhE0ha7fmzkK2C4RUkSawOzXN9777/e+ADzxP7A4AVg8W1XtFfOJFrUTFH57xqS5rq9Hu9W4Tfeldn7xWp+5uyns+86DX/MXNfrHLm77qp+/5wVV/9tDzXPSl57y9K2/5qist9bRffe5bz2XU2159nXd97Ukfe9hvnvijZ/3xlV985k8++K//fO+Rn/zdix76w3d+9aUffeEbH/u6537/9q3ffPI/f/be5333p6990hl/+eZ3P/Xl3/6Zz/+J4Ff/+MUf/vTD//32V39Ud3/aI4ADaICYt34KuH/+V378B3v5938NaG4EeIDxt1gR6IATuH0bSH8XiH/ox34fWIAjaIEPeH0huID9J4InqIEsCIAm2IEk2IIUiIAZ2GmSR3dOpIM7WHc56IMUInhCOIREWIRGmCyEZ3iJ5XidxoRNKAAFEIVSOIVUWIVRmBBWmIVZmBAI8INGRHgJyIAWooVkOIVYWIZl6BK4Rlk3eH6+ozZomIZQGIdamBAn8GssEWx5hns0mB10WIdz+IdVmBAQkBEWgREg5XsVqGc2GIiC/2iGjviIVygAhfgQGoABPIEBmmg/eKaIjehSkkiFZxiKk1iJDeE4PEEzx1VRfCiDRkGKUjiKpEiIbTUzqTgzF+Bfi9iKLziGsFgAshiKtNgQGmCLCUE05dOGraaMAcgevwiMkSiJw7gQ4yMztzgzGNCJu/h7oPiLwSiNlNgQI0Az1zgznMiMIPiGzvSM3/iI08g61niMRFMCKFeCYbiCKMKO0eiO4bgQxUiO8kg0yYOO3kKQjGgi+qiPpliNMhM5HEA11dNjrMiN9ZaQ3tiP/kgzkAMCVGMzBnmP+kcoFgmL0yg2ubMAdVOPfYiBKSiG+XiRCukQ8Dg42oiAvKiCMzQxkrOIkQxRAoLDiWVnkxT5ezopjDy5EMdDNdmoi0K5iAMDhzB5kabYEBJQlVZZlSREkAEBACH5BAAKAAAALJgALwHFAMMAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/6LAAB6ekpAiP4AAEM2ftHl5bKxv5pgSoxaVqJjRWdKcrBpOnorZMp0J3tSYywYAFpEfMETLTAoXKpdAJIjUdl7GywAALcXNWYyc6sAANsLGuF/FaYdQ6qptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwoMEABAQIODhwg4ANDAMoBECxosWLGClOzMgx48aOIDUKCElSIYGIKAeaTMmyJcOECw1eiBBBoQCaEwx+JMlxJ0+MPn9aDCpU5EmXB1ciXcoSpsEJCaLajJpgQUGiRbEK1fqTK0+lTFUKOBq2bEGnBS1QnRpVwtWRRYHCjTt0Ll2Rd+uSLQvWrF+0Ay9QTcA2agaCXkvavZs4ZGOQfcNG9hsWsEASgwsnsIB4Md3HHUH39Bx38lLTlJFajjCYsMLWF8TmrSjaI+mst7eOpYw6dUvAC1q7FtDag+zZAGrLRZ48d9fdfnv7TglYgnDNUWNLdP6Vu2Lmyi9K/2c5fvpLhQOhtsYgAINwq9vBe3c8H3L90NDNljdv0DKF1hxgwEFrERw3W3h1ySffXpLlx59LlgUnXHGdKYgcgrTdNxqDTO334ECWBcDahFTlZGBeGOJ1oYa2cXiagx9Sh15BHpCYAAVvWXggi8ut6CJSHsYYYgDqTXhYhSvqiCKP4sHYoZMxnhdTQRFQYOWVFBSYY5Jc7rggb1BG2d+M+jGZYJdLfhldmGISNOSLSjJmZoZqlvljmyCSydecKnqJppx3thTkQQ38pVChZTWgkAGzGbBoo4/m5agAjEo6aEGKsslSpjZ16umnoIYq6qiklmrqqaimquqqrLbq6quwxv8q66y0qlrpXZPeGleukFLaq667RoqrsHTxKukByCar7LLMMqoQs9BGe0CKzcX5GZ/VGiDttshOpC230sIX1gLTCiAuUwtQqy626x5w7lLpCuCuWe8y9SaQ7OZrbWmaktdvm/e6tO6+uNW5Z6B4Biyovn9ea3CDCAOsZ4ME61bxcxGjdCl/Cvt7cXcfK5ZxRBub13FKAzfM78NPjizkxE+GTJ/M9rmc1L9inqwxw372vKTNOuEcpc4k85ymyrgB/ZbSJsMMJ9IWQ40xmExPRzRDKfssJ8svVu3b1TfTjJ/YG1KNJ0pgB022bWvL5fV2b1OW9pZaOyx1d2+XbLXT+Lb/3aTfepl9tpRgAk7n3SILPviYU+5peJ9H75i30C83TjHiM2Nes+KLu8m3wEZvrTl+k7+NaFkwnc5UpsAWZeywvloaO+xeZ1p7rbjnrvvuvPfu++/AB69760K9HuzsxRJ7PPE/Gb/8r42CK62z8kofbdaRZ7/1t9Yv6233zNbrErkKid9SvI9XO/po85YVb/vjyv35wuljDyjn00UAwf78Q6Cd55aL2frYNkC34S81RRLOkfIUwKfVbWUFbFLpHqQWEuEIgIWL4JkemLQD+kUwNkqAiQQyNyRxMGonnNqa4kYQzISQMwzMYApBpsEMTXA6EAghVbSEkPl5rIaQE53k/zwYlgzocDAxXFP9QnetG6YmhwBiD6ggUsITCVF7dhviCqdjxPWMqocN7BsQ1TfDxG0Rh1EMFRV9iDImQrCM9HGib44IQxKycWdLXKIcUwNCG40QjDLE4hsF2cEzmqeCE7pgEsuUxzHqTSCPPEgCW7NAO4YRdI2E4+YMaZ4ISOCToJTA/xbpOEe6sZB26hzjAnnFVmbxZ0QcXBUheUoUElI3e1TlLONjSj3GsiWqs9ehzMI66MmOeTxxnuseaTu/cEp40IymNKdJzWpa85qdQiZJlNk85S0Tec87pjFhp82QcA98yKIeOr1Xy+fk8Zzo/N46kWU+lpDPXPSy3ytdyf8v+KHrWfVMSUCbcseiZfKWKjyIQyBSNBaaZZf6HCQ/UUkQCEjAJqD8Iy0dirqCYq2dNNQk6QwyosIYZ2mqPItHw9ZLUzJIQsMZDA83mlIMKrGlIi0bQf4jldcM5lyRlN8l6YdThOKNIAnEjiLhVlNSXi6nBISqAQeCyJi2ZoRBNdRQfyjVv7k0MK3BzmbEwtHKrFRtRZ0oLqkaVp9SkqZNBeRNu7pBo5oxAH2MCnvc05o6ZvWhZ6WbXTNHVxsSpKoBGtBb4dpUiIL0O18dSBdD6BayxlWujEzrPptYEOuEEKiU+5BjD6rWhAoEphOaKVPjOlrNSpSzVCJRHS3L2sD/mnCw9vElSfvHP42utrG2teJmX0tcikLssq0tbBCHa9yWITe4tCQtc9fKybAEcympI6Y3i7fdboLzm+UEyaRuF1pMYfO86E2vetfLXuCFtyPcTGZ35ftd7tbXu+/lSHzpm9+MGMABAA6wgAdMYJHMM1kRLVgv4Qk+ec5TAQOIsIQnTOEKO6B8+XwsYXGLH3/CC6D0gnCFRzxiBECXl8olI4d1mkq/iJjEMI6wibfaRukWl7otNsuLY0ziGbNyuu6MbI7LsmMeV9jHc11xVJU81SGHpchGnjCSM5viBOP4YJSBcpRlfGIrB7nK5UWpi7dc4i5rOLdCxvKYyUzhKZcS/8y6dTJTtLxlNz+VyV4Fcy6XQuco21mAcE7zcdfMZgn/2YF4rmtpj1rdORdayma2sYK1KGc+P9rQkXbtpGHZaEtfegCHFmOg9fxLlvTZyKHGpKZteb9OI+XUPE41UUedaMO62iWwjrGsuVrr5d7YtIPW8adBnWlaL/quahb2p3dd41V/mdOVfrUCpk3talv72igYZqLmu01um9Pb4gU3fJlJ7vaa+9zoTre6sdlfjOz32/fl7ziTF+9u1xve7b4Ig7unzgOXS9Ksdti+refgdQ40Ivc8OEPQ5+yQLsnDSHmfwg8yccIl+dgbxvgmoy3LYvdaxRof6a3PltyPe5nRHP8nucdDPpo4J7umJWf5kmXe5JenNOZAdjjNJVhqidEYjw2HLKVtrsuV51zoJg9zZ8oqzJ8bNOgZh23KE2b0X+v86HHsec6qvumdK1rqRO8czq2OdK/beuo+/zHZo772jYd9cWPvOtbRPPRgw5zrAW/72Hyk9aHh/dlm93Vzu/Zcp38U4IAHu91v/ver673lfB851Q3PUmPPXeRoR8l1VaNt64pbv5/3b+jdPXp9l5u860696lfP+taTKt8WeXe47z172FdE9rWfdzhx9biTl/3ykE9x78+8d8s/njkzP36PjC935SP/682Pft6ln3jnP1/w1ae+47X/e+tfP0W+Zzv/98U//et3v/zZR//21X9+85N//emPP/zn3373Fz/wIAd+8r2PffsHH//hR3dJ53/jd38ASHz/h3/Dh3j0934NSID9V38PaID6t3wft4BQJ4AHCIHy54AS+IEeGIIayIEJyH/5Z4IBSIEkaIEVmGcDmIEQCH4IuH8FqII1SIAyyIAgOIImiIHMx34iaIMreDgv+IMdGIT6pXv2ZXsUgXvjRntPyIQA4ISgB4VV6HpYmIVauIXXJIVUSHpWKHoCUABkWIZmeIZoSIYKkYZsyIYKgQBKuISN4oNL0oZ2aIZreId3aBIDJz0piIN8si56uIdjOIhtqBAnUHEFwXDCB0SC/2iIbliIkIiGCvEAaneD/peDKzKJaZiHnIiHAmCJBgECGlCKpqgBFYBWjahcj/iJZeiJrlgAlWgQHcAAtniLt6iKF+iI+RKLryiJsTiLBNEAuFiMDFACgqWAvCgfvqiGwOiKwjgQIGCMxag6f5iJgdiLzQiL0BiKBBEC1FiMGnBbysiK2uiL3PiJ0RgAKRCOxZiK0TVGdCgnzSiLz6iO3igQteiOuDgCwtWDy7iJ23iPnDiLDgEqHyAC1BgC8biKvdaKwUiQkziLo/IBxggC/4iJ9qeJO1KP6ViQ3niQn5KQxng617iR2ciMA+mR+TgQ/GiLHUCOLWh+HFmHKzmQonM4EPvojsk4k9+XkgKJjhIJiesoECPgjvCYkUDIgTVJjzcplDk5EBUQjuPYkwBpjioplCwZlVJZAV75lRVwXSfpfk15LSy5lZe4lDEIlB35lBHJlYC2i1gZlBGJlhfnk00Yh/ImO2c5kCbgTKUXe4F5ewEBACH5BAAKAAAALJsAFgGnAHQAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/6LAAB6ekpAiP4AAEM2ftHl5bKxv4pZV6JjRZtgSmZJcrZsNnorZHpSZMl0KCwYAMETLTAoXFlEfKpdANp7G5IjUSwAALcXNWYyc6sAANsLGqYdQ6qptr68xbvS0tMOIeJ/FQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwIEECAgQYJKhBgIaFAhMCmEixosWLEyVi3IhRI8ePGQWAHJmQAMSTKFMGQKjQoAUIEBIKgDnBoMeRG2/ivKhzZ8WePkOaVEmUKEuDExIolak0gU2RQXlCjfpzKtWQV6sOLcoV4tGCF5oyVRqhINCgZ32m3bkWZ8mucA1+HWihaYKxSjcQbEvS6lW+IAF/fBu38MqEBTvYxZuAwl6/VAVzlJwTclTChuHODQDB7t2EnmtGtIyWtFrTbFG7FbA1M9e5Czx/FuC5w0DKHVX3zUoRt9TWro0iFhhBNmOlFkbzxrrct0XnWoN3nZvUMwYBGGQvUN5cd2Dvg8FP/2Yt/fVwgRQ8c8DAwTOE2+IrLwcAvXf83MDLn9wcW3btx/PVxxxvAtJHnn4qbcaZf3aJxh2B90kVYITPHYggSgoGoBiDjgHY3YQgdpffhQVlWBeDenkIYYgrikgihucRFAEFNNZIwXtmUVgVi1kViNmLcsUYl4862sfjXxYCWaKQcBF5ZGRFCqXkQhl25eSHWEI4opJVcnVli2D2mOSUASyAAJNWCmAAbwYktGZWbarJpptzCoDAdkPKpOeefPbp55+ABirooIQWauihiCaq6KKMNuroo4ESIOmklFY66QFxImDpppueKSecdIL66VVxvkmqmwdwqqqkcS1wQEJ4tv/6pZhPXibAAbGSeVhLhs2KZK2lbQlkl0X5CiWwpwn7IrFEGWsrsqkpSyKzKjlbGrSrSXshtSlZexq2fWmLILcoeZsauN+Jqx+5J5m7GrrhqVseuxC5u1uWYsorHb0L2fsdvOPpGxy/TwEsH75ICuwawTkanJvDv+l6EJpeRmkgxBVOqHBmDKtIK8LHuiixQB3Dh/GOINu6sWElP/hxmL+KPHLLAfgb3sn2rVwYzTaPh7OUI+/qWs8HwwylznHxbDHRD8ssMUsNGNZAqKeOSlWpdZp69Y9pQur112CHLfbYZJdtttdVax0U1qKqvTbVW1sdFdtUHWD33XjnffeaCen/7fffry4t+M8XGwD44a0GnmtXCzAtYcql4Rq00oQ7XiHSmlFc7OCQf+u0rpR3fq7o2QYtdGaWo2y0yqaHvvq1pIfbuubNcv6651rOzmthqRsZe7q6D237y8QnHDzqw8d8e7TH95p8yMuXPjnt1T7/7O/xNs+79bBHL/v0u+dZOffJai8+9j6jXxnm01HfLfmjew88+MKPPz775oXfJPzvqo+f+fuzn/8iRj/kCVB+2QNf1AozNbm9zYE+oVvc3BZBrlXsbBjMoAY3yMEOetBQaWtb1kYowhLW7XB+49utUHi43g2oeMcyHAv1ljhYGaZx/LsXAscjuZm5r1w5/NcA/y93koY8JCUQkIASlyiB5CxJf2k6IAxZZ5AkymSJDiJIdfyTooG4bordA2P5CtKZ2TTFNgYJC4eeWL8hqk6MzCNIf8zYFBzRhUFNyeIXlQfH/uVORmIBjV1ylQI8KuUCE4NixaTIR+MNZIt0bEpZBCIBQ9bRiz9sVxBv5sacEUSNSxGkXfSyAUvaBZOK3BwjoZevOy5GlE1BZCXVwx7Z4GiPrGxkLo82EFCGkjay2UAprYMd7ZAsk/XaZPp2uD5XNuU62fEMIgMwy0AC85LHTGXtVnm9PwrEl+tpT2gG4hnGTDObbWRm09RJQIFAkkGTFMiJftmgRKazjzrE5/wGkv8eQy4OlHjpECrvqctu6jOBA5mjf+z4SGs2pYvoNGAnX1jQYFWRQWisYgQiIJONOtGeEmVnxiZqoBFJYKMo3WgW+6U5XBq0orhrpX5ksh9kspSbYXRkeYxYU21WD6cx1enTErLAuDSQgjuR4NzgtlQIJtWCm/ugVKdK1apa9aoyCaFWtzpBEnI1KDPMmwrD+jcXXoykEpEhWe1WQwEsjis4BGr8xNTDofr0fXL1o0x9eFcg5jWfQgWdTQuGVmU2s4DO+6sQvWlXgu7ypY+1KGK3p1hOMlawfdVkZZe518aG9KCchWkc+epYyJo2p7yc7PlE+kbRSo+0n3UtYCM7RtgkJraw9wNgFHFbWPwVxaWoPW1QU2tbyvKWtZ5UbQCPC9rDxiUgACH5BAAKAAAALJsA/QCnAJAAhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/6LAAB6ekpAiP4AAEM2ftHl5Y1aVaRkQ7Kxv2hKcXorZHtTYsh0KCwYAMETLTAoXFtFe5pgS7VrNr5wMKpdANl7HJIjUSwAALcXNWYyc6sAANsLGuF/FqYdQ6qptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwIEECAgQYJIhBAIaFAhMCmEixosWLEyVi3IhRI8ePGQWAHJmQAMSTKFMGQKjQoAQKFBIKgCnBoMeRG2/ivKhzZ8WePkOaVEmUKEuDGRIolak0gU2RQXlCjfpzKtWQV6sOLcoV4tGCFZoyVaqhINCgZ32m3bkWZ8mucA1+HSihaYKxSjMQbEvS6lW+IAF/fBu38MqEBT3YxZtgw16/VAVzlJwTclTChuHODfDA7t2EnmtGtIyWtFrTbFG7FbA1M9e5Czx/FuDZw0DKHVX3zUoRt9TWro0iFghBNmOlFEbzxrrct0XnWoN3nVvXswUBFmQvUN5cd2Dvg8FP/2Yt/fVwgRs8d7DQwfOD2+IrLwcAvXf83MDLn9wcW3btx/PVxxxvAtJHnn4qbcaZf3YlB1+A90kFIYT5ISjXeQOVwGACFZgV4XMfVjVhdxVaeBCGAlGwoV4AdjcigSHaV6KJAilIHAc45sjBex6+mFWBQB5I40I2dhWkj3/FKNSQXqEI15EuRgnjjDQWyRWUMCIZmZBMntiSYVj+qKSBFHZZ0AIIOGmkAAbwZkBCbWb1JptuwlmnAAhsF5dMfPbp55+ABirooIQWauihiCaq6KKMNuroo5BGSigBlFZq6aWVHjAnAph22mmadMppp6ihXjVnnKbCeYCnrFIa1wIHJP+k56thJqnlZQIcMKuZh31ZWK1b3loalSZaWRSwuAp7GrEWGksUsqUpmxqzCDqrErSnSbsatfpZmxK2qWnbF7fleYsSuKuJ+x250pl7Erq7SSkmu8G5CxG836kbHr2u2bsQvuHpOx6/mfn7lMDyyZskwYYZ3KPCwUKMK8OFOdxilhJHSyKvXroG8HgI48fxQBY/mHG2J087co1qXjnmxwlPuXKvHr9sc8i/zVwydxj3LGaZK+8cAMy54QwixXEJTbSEKW+rc8vH3tx0vDKPjCbUz5ZK1al3orr1qKniuavLkpZt9tlop6322my3XXarnmqaEKdwewqq11FxTSreQen/HbYBq9aN6auxCjA2VwssDaLRVekaNNbXSu2zrRs/7uuekv889bpPX/5k5pRPviXSmkH+LegRiz5x5zUzbp/rS1re+uYB0z4w65kpLqLtlZE+nennop4s7yLLnrvwGqs+LO5gIo+y8ssy/6vz4RKfs/HNw06m9UdLj7n2usvo/efgU+809tOXX77v5nm+pvrcR4f+9/G/Xn/sIyttPtWaV50/8O/aX77uZyD2FYUlDTBMA8D2Na3ljYEPdGDfMPM5t1nwghjMoAY3yEFE/W1vXQshCEdIlQOY8IQoTOEJ25QQFbrwhYWDH/TCBTgYwpBwsjJM4gRYuxmuxnH/c5/L/2TYv3mdpCEP6RYA78VDkBGQggN5wANkIsUHiKZgS/xXE2NWxIUZpDOzaUqH+pXFgz1xi8UbSH/C2BQeNayMD/Mh/0JnRIIUZymgsQsZhRg1ItLRiwNJiljy2BQIYJGPWfNj6uookLAMkjaeYVHS4HixLi7yj6MbiIoWQ8imOKZilDSZHAc4yn0NxJGP9I8kf4fIyClyeP7bZFOukx3PjHGSrTzdK5PHyACgMgHraU9o3pjL4O3yeb2szoYac8jZldKJz+xdQdKzzMO1z5mWhGU2l0eQNfrHjaAsZgCPWT3/RZFBttkjNjGpTXZysyAQiKc843lFYq7zkvhsZybnR/++M65vfO/z5xkNKBwBJLAwC5SgT/zWQL4tFIIT5NKaOkjRilr0ohjNqEw+yNGONlSEHg2KDV/IwlyN1IbhG9A2s1XDk6YQh4bTYUq3F83cAJFj+iNnuioXxHvq86e8BGRPj6fTOe5zqNkTaE2vh9T0KXWlKuNnQJe6uIECdIhPdWf0pIpVqu7Oq+Ljah+zms934jSUPIPqTsGKv7OKk4lFJWUveZVTsgJ1q02lH1tpqtZxXXWse50pFM1U18Cikalu9WlQy4pMoSaWqHZd7OrEmsjINvaoj02qYf9puYPGJaEO3QlDIxhanIw2ogzTqGpXy9rWurZQIX2oQkULUdn/lnYkp7VtVlyawpLy9oWCPeziWvrbA8DUmkTZYVx7KKab0hWtQxPuV/vKueA8YJ7ydFDHILtZq7pGmbJZJc24S13majWqmfmlZz5JMugGl7OGAeOG6lnY8kLTvtLMzDKVkk6WvVWLy72vY+Gigf0qxY31Pe9a8ZtGuAjSwAnQU4IZW04GI7YrBVYPe2TDownftcIK9mthvKkUWsqmvf81Y3cNSzH54hGSbUSxYi/74QUPGC4aSqUYt6tZC1eVxa6RJRuVQl/3Std+QHYNKvHCXv/OGMQUtjFmC6NMvIzNw5LNMo3NGl8NaEAmXtaujMkbYrmW2ZT6kcl+jBxgLt74GXh8xPKW5wzlyZYHiWtOcRx9PN0z364wAQEAIfkEAAoAAAAsmAAbAMUA1wGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnb/osAAHp6SkCJ/gAAQzZ+0eXlsrG/eitksWo5i1lXDIB4LBgAaoRJwRMtioY5/69OToNXql0AL4Fn0IkW/5MQ/8uN/+C8/+nQMChcW0V7a0xukiNRzHUlABYWLAAAtxc1rocnqqqu/50n/7pn/9mtAFVVKCMeZjJzqwAApWVD2wsa/6Y6nWJJph1DvrzFu9LS0w4h3n4Y/7Vc//DeAAAAAAAAAAAACP8AFwQYSLCgQYMLDggQIPCgw4MLFgKYSLGixYsTFx5o+LBjgIgCNnr0CBKjSZMLHQxYybKly5cKNHIcCZGmRwILbXaUeLInxYUEdD4EKtQhT589FyJ4ybRpTAFBi0q9mXMqwaNIUUK1enUr1wBYs15U2rRsy6dRv37FKUBtWLEViX6Vy/Ut3IwClpo1i1atWrZuBdzFSNdq4al275LdW7avX66A5woebPGwVMtFE8NdzJip48dTI9edTPmn19FpDZMuDYBzZ5efQRcVrZq16dSIT9e23Trva8+YZY+knZt3b9yXdRfn7fr3ytjCbRJPbjy4Tus2NYtt7hx69OFVR1f/V54ceebVpbn/9v694/Tz480LxU5Te1b1r9m3d/h+PnrK9I0UoEf2IYVfZ/rtZ1B/1/032IA7kQcfc745dxaE7TGYnYOKSeiffA2Op5eFz2H4nYb1cbiZhw2CuKGIJLKUoIIEoSigituxuKGLKcIY4wAz0hiAjQTieJ+OKfJ4o48xBkkjkRHGF5iSRTJJopMKQjmUkQYieSOVUVI44pUmRqelUVz6VKZBaxZUoJoVNtnmYwsgEJ5qBthmgFIzJYdAn+flydqeeSlg6KGIJqroU39KttCjkEYq6aSUVmrppZhmqummnHbq6aeghirqqKSWauqpmyJAwKqstuqqqwcQ/6rqq7S+aqcAgpZGqAEH1Oorq7fy+uuvweq5kLDD1hpsr8nS2qhVCS0EqE4lUSgStDJxVa1t2WIb0rQ2gavTmQe9mZSXRYK5pZSSqXvinctxi26E7pab5rn1sjlvhvBSR2G+bu6LJruoCUlVW5IRrBrAXSmcG8OPkauvw+UFRvF5EPslccAXf2jxv1Ma7F6/E8qb8ZwDmXsSymAJbCbJ/nXc4scmhywyfzCHCHK7NLPGMst/5fzizgUnTPTCN+OMsHhHP9xzei7rm3HQS+9WM89GX1100gVt3HDTFWftc9QBT72W0D2CjfHTAJLdldmQob2k2h6LDfXJbkcsd5V0z/9sd9t4w22V1ynfu3LeLeMt845cL7h3mFoj/feDiAN9dtXxjh0425Rv3niNj6/bN+OTd+j553WGjmaulMkqborP4mlsXq/fGHtuqOau++689+7778AHL3zuszYLq6zG23rs7MgmD+yxzDq/arGDHusAAthnr/323EMvPbC1dxQtQ9qqrNW1U4GEvlTbjq3Sj0+tX1T4B3Nuus1MyzumhVjuR3jLi0uS/Va0v+5YLm6Y81fknFY6Av4ISAccnOrsFcAvDTBHBVxPBEMzwYmNToANxOAD+8evBJZMc/iz2tgymJ8NSuV/5iNM5WZoJf65cDYd5NgHLRjCI7EQQTcUCgz/DaeV06kQaj9kDAnfZcKY7TBdF/ThCIM4rhx+bYFhy98Kp4g4qkWxS0bMHBK5KDgONlFnWFxbD7uUxL0s8WVnHFoa66bFMcKPitKxYuEqCMU1wqmNfMEjTYbIR3p98Y9k/Bzo4pi2Ofqtjm0DZGMECR5Gzs2RpIMk5STpFErWz4/4SqEYI5lIRQ5JjwB8oiFBeThOAqeL2rpVYFg3GNdJ5nbJoeVdZLWoXi6KT44anjCHScxiGvOYyEwmpLjHzGY6QFbNjKb2llc9XEVPesv63vSoqSvvaTOb31TAAx2wEHHKiYgylJ9Q1Ec/h7QPauqkVrfSZ84Y3aqeNkTnWGao/zhVbilj+LTQPe+oz8rw85ChxBpXAuqcgZ6zkP9E6OHCKBWG/sahZCpoXA7KyiKKsqIPxGg+IYqmfmIShAsNaTkJSlJ7mRSFCrWKRV8jUgNq9DYfPeHdciqUmXamphq8KV4oqlPA8VQnPmUMUFso1OMcVY4w3ZpMVSqApAayqT+joT9LqharmmWpQMQqRzV5v5hOxatlAasSY7hPojrxpDxM6Y/U6ka2GtStaIyq5ORqz5U+dKsulahHzQrSufo1oy2VmmBliFeboLUpdL1qYsu22LY+lSaPZUpkJynWxjZSrwzkK4k228nOXvaSoM3iVA1bVZYCVrEdZexpPZLZl/+Q9pWTfVtl7zrbjvjyt4e6J3B/y83WAXM0uAzU7JLrH+Y2SJnQja50p0vd6v6ueNKLlVK0SQDqddOa3AXn97xrXFxdT5rS9OZ42wkRhZDPW+OJZ7jmyT67bvR955QvTdirtN1u1LOoteM5y/hCVNoXp4Qt6iZLqUhCvpaysd2nK2HiyZFZkm9w7SNZHUhQAuPwwpBLrRo3LMIOm/KUIBZdhldJYima2JQOXnFEI2zQCcOmwg+JsYjpeERSvrjBBjZtgt+6xR9/Tsc7HXJeBZxRDwsxyLkt3EuZnE8nVzHFA3uwbmm8URtfCJYI9C+CpTrKBRu5cUg2qpKhSmWbWjn/j1imoJalLGa8eFlGOO4vl8e81x6becAnTnPneqviIgMaxlCec+Lq3Js7lwjMEoyzB2XM1T3bmcFHTjSlA2vpRmOaa6mTdMB0CRdbIpe/+iK1WHg5XF8eVzXWjbWsZ03rWicTu87Tbl7CW9xaqhebv3YeeX0N3m8GO3nO3a97UU2Qd7ZNvyNhZ/nECm2S0Hd+Xux0VqesZjJzTdBl9baCw91nNGt6x4/0M7lDm2lRXxHdmVT3igCcpXMnWdxEvne5v23vbu973PMmNBMZvW2Cj7XdBqd3iPXNbnO7e4+KzmqUF43oh6dy07BtMRgFDseEc3zS8EYpwrV98DKvW7UO//f4mj/LcJTz2+IHHurHdYhxCAMZ5kLG95L93fCXq1znbOa5y5MG7oCvPMBCH/HINQ4nhRe65UpPOcmdnuWab/nmP/93vpPOY1DLUjKqzoqpVZPsFIUdKWPPTdlvZOu2u/3tcI/7pc7uE+QZG1fMu6awj228Ye/SeuhNb7HHOx5mD8TZlKu2+K69zpj3Br8ZVfxDFjCerPcc4CU+NFcqP/WZv7vNQc0Y55meUKCz3Meat8ro5Z0jqstZf5+WyupNbnTTI/3PTVbL7BUY8riy3sWpn8ruMX8k14Mc9EwVPYUsP/St477Kul9+54+OYUPn/ivDd/7Jo8575IdV+fJiPv/3ic/G2Bcl+zsfNPUXjvrrb176pJ+o5yEO+zPLHv6/3/j6n95+6GMf/7TXevN3cdbnf+8XftNne9XnfWuFOOgXdOqngOz3fG4WfQgYf4MlgfxHgaFngWMjfl3Xff1Xgf93gfnXdAPoeAfSgBlDdz1BKLO0XIZXOGtXJC54EqzWar9EO8Ekdz74g0AYhMeEa8mja0TYd732d4MHbOYVeNGUhKXGd80iXtg0g+NjhSr4LdMGeSNlLViohdCSbRgoW7fERROXZwdRdAJYhq5ldXRWcSDYII4GQTmndSKjhsUXGHP4FGcIaWYUhxuyh1lofLKBh/qHXGYYcX5YYDjXh4L/WIeXdzOGiIJs+FduSHFYl4Bkl4iXiIaO04iK+Ih9+GaD1G8RuIlt2HsatnQnWHqoaImqyGKsGIB5WImIpYikWEmACDucGIszloljaFm22IWduIgftou204tQF4JEZ4rb11zKyHXpJnXByFvDaFOjGGjOWHuveIvFmIufVI3/pYfReIqRaDChFoPVVIMRggA62EtQuGqvpnYz2DJCeI/4mI/6KCo3aBK7Uk39iBH/CJDMU5AEOSjjQSEJqZAMKS8LaRwOeRIqUAAqcDgPOTYXCTUZCZGUgxEg8AEf8AIC8AIgyQP7tJEd2ZAYqZIcmZIW0QEJkACPEpMrcJIs2TYo/9khOdmSXXIROxCTMrkQQOkBBrWTOWKUR4KUPKkVFgECQBmUAgCUHVCUN+mSKxmRWLmUK2IRNPCUMwmULLBRSgknY3kuZamVplERLfCUUMmWIJCWVamTcbmVc4mWMlQRMOmVQvmUNACXWamRdZmUgWmXYjkRHsCWbcmWLYAXZ8mUg0mWj0mYvUERTsmWXymVfnmVmgmYf4mTktmTFMECbEkBAkABbEmUjBmZFqmajtmZn2mTFJGXMakBFKABT1mTmcmZm+mZrimXr7maFHGYiPmUH1CYvUmXx3mUrImWb7ECwxmTGECVySmY0wma1fmZb7GWz/mWxrmbVqmb4Mmbv/95lxfBAhhwnuiJAajZneH5neLZnr45nrAJn8jpnfFJn8opn/P5nvzpnv55n/2pnzGXhY1Jntdplvq5n/9Zn/hJnfbJoK1jkOUVkBcxkN9FoRZhoRMqoeW1jx76oSDqoUfYLEbIa3h3kBfKocTWPEzIosJmhcs2bQWqoAuaI5LnToxHLWLYisD5oPnpi5UGjDzamj7qoMs4jT5HcjMqnUDKaUJKi9ZZpFF6pPFGjUNqoFIKmd+ojaB4oD3aoIdohwYziQjqpURKpSJnpVCqpVlapk2acbMogjVqpAHKjec4ptv4o2DKpmjqe2oqpwA6p1MqjVWapOKYm3Wqp4Sapob/eqU0GqiQaqfNd4d5SqeCyqeL6qeNuqZuuqedmqmr+Kfkh6mX+qnmOKl42qVt+qWJWosaOHBKupyPKqnjJ4mVOqilyqqnWquUqqqeqquRuoavKhzpCHYqqoQYWhEaSmzsuCXNiiYhGq3SOq1tN6LJUqJ3l6wUsazIeqxRuIR7B67IBqPSIqOyyqS/ej5feKM1cTkGt6TsCaqyKKraB6G5eqby+otxOqqmGqyW+oxI2oy+2qq46q9heqdCQqbAaq8GS4nDKhwKi6/3iqV9GqqbCqgMm7GKuqvMaKsDO7GzurEAW6gC+67nGq8cG7Aea7JmSrH5GqT7Wq8iq7H/Sqsd/9urLLuqEjuyjFqysdqyIeuqYpqwt0qqDduvNquyOPuzOuuyKUuyK8u06eq0PKupPnuoqQm06PqyThqz6Xe0Czuzwjq0T1K0SCu2NTu2CFu2Hwu2O0uzB4uqNFKso6GtE8Gt32q3AIC38siDp9aD1Bq4gju4xWStw4Kt4xWPYqe4aMe4dee4LyiFyUKFL6otMQpfJ4uoIGtQ7IoQORouO8qpYZu2pBu3BBECAhACFnu1jrq1m4uy2ycBEvAosisBItC19Pq1cGu0u+uKAyEBMXmZJYC7F8uvo1uwvSt/BgGUl5kAEgCnuQuBbku1yZuBAyEDzLuXQAm9xSuzpcu7aP8bpiKgl1H5lDVgc14rvdVLveHrsAEQAeSLmLf7hk+Kse17tt9beiNgmdoLlBFwdel7eq+rudMrjAFQAvxbvmw5v57YNWZ7vOCbv8obAMD7lKRpmmw5vPQbwLdXwK7rweNIEAgMlLRpm2w5AgAcvQIMwgS8vgYcAPv7nP6LvircwS78wTccwgQBvzKcADPRwIuUs1MbtAe7AD38vDTcvbp7vxCMv3FbwYipA8TLuqL7tkxsxUJLEDIQAVzcxRGAwtxLxfYrwViMvGort2wrxAQbwaZ7s6mqxgOctW+axEXRAJdjx1bRAJCLg3vsj30skH9coSijx3lDyIR7yIicyLr/o7d827gnmqIouqGRvKJ6cgCWfMmYnMmanCcaocme/MkEmrlyrJG88smmfMkSUcqnbMrk+l7pE8paC7sr0rkFIW1h6K5Su8ZO7L5rW29tm8MtfMVk+LCF+MBlzMbI7Lto7MtwzMKjzLVhHLVYO5miTM1bCoe5HMfWPMcprMTqK8xEnMwTTLbMnM3OvM0VO6/evMLA/MzaXHDYPM2w3LQv3Mv+Y8zsS8b53MZK+8bm3M7oDM10LM2tK8vgjMNnzKv+LM9iBa98Zs8l1MwAPYgDGLpjbMb6HM7KrNBE+8sHbdAZbY3EDBoRu8/ifMwb7cYdLdEfHcxZvMwdgceRJtNS/0HIjBzIGYrTyqrT2zrIPq3IQB3UQr0pN/3IlDzJ3YrUeeutq7bKp8zJIeHUpjzPQ4zQSanKUp3JqZzVntzKX+jQ7nzOn7tfYx1tFm28KN3Eam29EA2rDA3WAf20PUvQVWzSu3zX49zWHffPLR3WSQu1S/vW1UzR++fWBe3SGJ3YvAzTEc3XIQ3S/AzYC33Yft3XcV21q0vXF33Sdr3Ww0zO9+zRj43YnF3PjG3Yda3Rnq3abH3aey3YsUzaKd3PK+3Yio3Xrc3RaWzbpW3VkT3XgU3Zlz3RkOjaEIvPrN3ZaZ3bKr3bsE3Pvr3Yul3Oz13VkC3dzU3dwk3VupzXxv9dEDTNiOEtFDbN1Itr1Ef9XRBDyOs91O793vBd1HbbyI4s3+Zd3/ddd1y9yb2x35rM3e/c0Fjt31vt35fs1eYa25U92nhBy81W1iRx1t5726sd3bMt2bVd3d293J+t18ct2hTO4clt2tMd2izN4MOd0Nlt4ryN28rd4d9N0sht4RV+3RcO3JOd2jQu4jsO4yXe2Boe4NkYz9vd0INdcutsw5YN4JitzmKM1i8+4j1O4isO5EUO14Q90noD4r1t4zX+0DG+5Sce4lEu0qBt5Tru5Tyu5j5e5ai92S4u5WxO5bTt3Fd+5ENev1Au57L95TJX2K995wqe4i/94w4x3k//JgCIbhPlrdR9a9/V1N6FDN+UXumIDOmSDMmanumcnt6t4wCgHuqiPuqk3uAGjspGPugEOuD7XeCnXlsuQU6uXF+pDt1zzrnraoWw3hJfF+RiXdyGfhAMVQEXUOzGfgEZYFszfuvM3ufMXee05RIbIMMT4BK9Lui27uxtDu2+5RIcIMMpYO3Lru18vuC/bbU91RIZ0MMJsAG8Pu7m3uXkbuYeXhD1NAHf3sMcUO0rce1pPu/NHu83ju5IxRIVwO4xWQEs4e9w7ueETuZ0juEjYU4TgPBAye8Mv+dTvvEB71SAbhDmdPBsWcKImewDkPETLu8C7/DwLFcVP5qlyZb7/97v8P7wKm/zA5/ZBb8SIp+9ChyTJn/yNc/kxE2IBHHv+R68/RvuCz/0tW7dYB7sIK/u8QuU7t70XB7nHA/wUe/mwu4SKeDzQHkB4p71LP/0At3NRRFQ0670CszvWD/mN5/lZ+4QDJUBFVABj5L3V//uZr/mXL/yzy7x0d4Uj5JWTo/lwO71U98UJiAAJoD4f1/mHU/39X70ZeECJuACki/3Wl/5SL7276goOLAQi04Tjb7pnr76Sijpkm7psB/7IIrprL/Ujn7etF/7TX3qsdLfvO9eih/8rM7Vrm7gCI65qo72iZfr2iLhS4ziRK/i3I7mDQ/4gk/5XT/9b67xoP+v+EYv5i1+9nnOwQu44dh//RGP4xmO7VCP/vQe5hqT+HiOi1zq+eJP/0T+7+7f/fiv5yn/+QABQKAAAQINHkSY0CBBhQ0bEiQQQOJEihUtBoB4UeNGjhwJEOzYkaFDkgsLliw5EqVDlSsVZgy5EWZMmjU/Cqh5saVLhDt5mvz58mTQgzNzTjR6VGnFm0uRDiU6EGpUn0Gr/kx6NKtTpU25XuUJ1qXYlWRRbq2JlqtNkF+nEjWb8q3VuVgFRHSLd61Tr07jkvzLsm7YwWPvrlW7N2TfpYEfFi4L+axkuXobH1a8lLFSx0KjFqUMOLRgy5wxZz66Wevox5+BugbQOWH/4pC0OzbgehP30gYEDbg24Bu48M/BBfwubntj79NKmROEHl36dOrVrV/Hnl37du7dvX8HH178ePLlzZ9Hzx15VOPrg7Yffjy++/fE2dsnCr847NisZ/v/jz/ZegKQQAEL5A802AZUcEEEX3ONQQgTpOrB/g7E0MEMI7QwQQkv1DBEDjf87EMPLTQxxQ5VJJFCukQscUUZW4SrQxrtuhFHGCvM0cXJdqyxR8OEjMxHHkcE8sUkdUTSSCaPbDJGIn9c0knBqhwSyyK1pNLKLKXkUq4pxQzTS6nKvBLN1tT0zMwtwYwSSjjnlNPNNOkMks0A9TTQTjLxVDLOPAUN1M81/wF9ss5BESXMUNH4bJDQRBdV1NEJKcW00Eon1dTSSzsFlVNRG4XUxQ9ZLPXMVEEEbr7k5HuVvp/0uw/WWmXlidb80uO1V19/BTZYYYcltlhfcXVJ11nxq0+AAp6FNlppp32WIGqvvZYgBFyNtdVUVcQ23GitFVdciAw4IF1112W3XVQ97ZNRw8o111l6sSXohgWcWuBdeD8ddd57s7V34GkJegCxGf+NdFPCDKaWXIjHFSBhi1CwIGONLYDAIn8ZPlXGiaWVeOQCELaIBAZWZplljxdmWFV5IzMZ2pJHRpmiBlrmmQEbKvr435APrLnagk3OeSIUeuZ5N4mChndoB+WLPvlonCumCAamebaAIqg9lZpDqm+eOOkActia546fhjnmsGMc2+qysZZI5bRbnuGpMd18u8K44044hOpOiIFpGNje28y+a/y7aJSvO6FnFPReNfE2xXZcbohRFpw6wnt2+mtLF6er8ZrNDuDulUnwum2QURQ5c8ArsjttoF0XGnaiZc/cYopmSHttyiUdXfepeT+dboog2Lrr2y23knS7TEda+eUhwD57CJwefmY/pX8Y+ep9vwx6J8EXOPnZ3TLfSPRpFv9q8k1r/yBkV1I2V2aX1dxgsjcXgAq4wpz7oSR/uQoIACH5BAAKAAAALJoAzACoACYBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2/6LAAB6ekpAif4AAEM2ftHl5bKxv7ZsNs12JHorZGtMbysXAFlEfWiESoZXWsETLQyAeIqGOZpgSk6DV6tdAC+BZ9CJFgoUEzAoXHpSZJIjUSwAALcXNa6HJ+J/FQBVVUo0SGYyc6sAANsLGqYdQ6BjR6qptr68xbvS0tMOId9+FwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAMIHEiwYEECAgQYDDChYQoQKRpWMJgQgMWLGDNqtJiQwMKPCxMiGECypMmTKBVU3MhyY0eQMGPCRKiwYIUEOCkIoIBzBkUBLYNifCkTpEiUSJOqBCpUKNGiUGPSNAgBZ4KEAqxeKLiyacunUQkeTUq25FKvQcGGXTtwKsEJVq8mjLtALFO0LgV4ZDtwbFmyZ/Hm3cuXr9uBEeJijQvBruDBhQX6/Ys08OOheiMbTkjwQly5WeNO6Hv3MsfMkSdTPmnZNAC1moseDvAZ9OcIpF1fhB1W9WqzXV3zjj2Ts8ANtRd/FiG5tOnhUH3/HtD6OWriUA/D/aw87sQAwa0T/+Yr/Xf1y9CxG5xdNa5OnnE35Nb9+jp5ASOnsw6P3r764jUJdFNcHVDQQVwnOEZfejKVt9p5jzH4n0CzBeBCbcspqJuEMDlIGYSCcfhfhbRhiBNuGgrn31oe/gUiXiKqR6JnJo6WoniatVjWi2jFiB2JAUAQwZBERiAfV871Nx5bOgLGX4QrTriecYU9GWKUUTWplJUwYillW1TeR199S7KIn377JQllmV8SBGSWal7JJpz5oUkSj175SNyb0cXZ5Zx91mknnk3pqdkCCITJpAAG6GaASHVViZ8ClFZq6aWYLtWoa4/iF6mZWIUq6qiklmrqqaimquqqrLbq6quwxv8q66y01mprqpwmtOllne7Kq66OApsro8ESOyaUC/rZo7J5Mluos05Be2xGXDY7ZrXWJnuttNPuxm1e24ar7bjdLkuuiuJu+C245Zqrbrronmtdu+7G+668/cFL71frUtuvv/rmi+++GmEbbcDIDnwlwQff67C9EM/LML8Kd4nwwhVPbHBa/2J2scUZM7wxxSE/+3G9Exfc8WknZ/uwxCmrXHLDLwtcc8z1zcxxyybr3O7ILAEd9Mos+zw0zjLfjLHSIDOtMdFCs+u0yxEjHTXARktddcJIe5t10lP3HPa+V3v8NdZj79y1Rb0O6+tjbbtd7NuCxd3frXjnrffefPf/7fffgOtNwOCEF2544Qd0isDhjDOeqLGm2f0r5JPTjVevBzSu+eB8LXBAQp+utUDZZqdNsroHhN6mm4qayTPNW8u5+kd8FkW612eXriKgbdbeINE55467urx/6XtMtxdt+tHJFi/l8R0Cn3zwy0M2+5QBiim88rH/eT32OUov/uscOz8h9EaNvz31u39/UOu9qV892O27D2b2i5J/evfLmj8i/HDSH/Pm5y//yQiAfRKg1mBWP/sFAH0fmZ4E5ddA+0EwJBRkoAaV5EAKIdB2GbQZ/5plwB998HcKpN8G19TBB54QeSHk2gifVcI9vTB6KUTbDKNVw9hc8Cc51N0K/2XXwR8iKYjD22H5OoioG6bPcmiRHNyEFTkqTg4BqutT4LbIxS568YtgDOOrNqe5xImEjJp7HBS9IsW6WXGKlIOjATKHxsN17nMCyCJURhfDpSmRX6krohMj2Mem/XFogAqBAEJwvkFiEIncG6L3DHKBC2Clkhf4jmaMeCMRShJlOCoIjbqDok06Eojrm+C2ymSV7iSAOabEn+tSWcj+FQQ5OHFlgmIZPkiy75MkJMiAcjmX+PAyNbWkGjBpSJD2EDM03okMJ+dDQCF6koMC2U4ri8kYaZ7yiLT05VMSoxhuRnMzsoyfOJPJTIHQyCrvqU0p2TLN5qxzneMhJ04KdP+gz9gInb0MJy3Ho00TJaAx3kxnAAVaTW8tyZkm0iRAkXnPgdrEoAk4UkID2tBIXpOFtzTRLo8pqYp2lEwLgYBKV6pSWJJUeydVZfNaWE/wsBN2y+QhTb/ZSRnmVG3E26lCE8jQQ1rPfk0cKgjX2JQ2Xu6NboxjVLF4HzFa9apYzapWtyqqOh7OjPjx6uHUOLeyys1tdBQr59jiOdAVho8mNarKAunAmso0puLr4UvzV9Sf8kuvG6VoXz9KxLrylJpy1aFfESlUjibWmj7FpmGVisLBRhakk3XsYhdI2ElmVrB4xWdjQfvYJG5WZYAtjF1vCtTSnia1Ey2pZf0Yys//yja0FhUkZWEY19MWcLS3de0vO2tL3WqWuMpEbjttC1Ph3rWC7lttb5WrU+OS1reQpa1kLXhYe87WkLXlrgAaUJgGQPWpUkUvU4Xi1Cgaqjlcja9850vf+qrqrJUzaxXT617+svG8XjmAgAdM4AITuFEJMbCCF4zH6V52YXNkMIPv6NbOPRe7pu0PXcV73AeDl7pLtG5wMexRDxeXuXzFLV5hS8/u2tTB2sUsh68L4v2RGKUibu6NL7zd6LqYxyZOrox9vFscfheUPf6edI8s5MLOeMQ1HqBzvdS7H7PWxlE+6pN1nGUVdvm3OU7xlEUb5lmqeMosXsuSz7xjKhvP/8ow/vCQlQxnJosNunQucvrijOQ5X2/NY87tZMnLF/P6t6kA/u+h2ZtoRLs5ffaNtKQnTeku4leO+s3vpTFtGgkzGMEC8LSngRzjUlsswqIuMIXz+FZSy9nUy9owkTsM6ya/2rNbFnObyYxiMwd6xcDlcpDv3OY0hwXQuxZ0rn2dbGCXWZ12xmmS/1xnNn8ZM8aOCrKvneFhV7fX0La2t0MM7oWKu9bLXXa4f43mYOua2yVG97fVbW52F9vdzIb3cMf9V3yvu9ntfna9AX5vgRP13Lc+Mb0Pbm99vzehhGaLodcblPYqmuItsbijAVvpjnv84yBv1abVm2lO7xfjLP/ReFBSbWBQs3zBru6zvHeG6pcfYNV6LApco91a68g6z7ROuK1ljutZ05jfUi54uRlOcIc/upF6JiSfhx5Mg4Nw6sSe9uy2jXTOdh21/h6403m98KvzHMt+3nq1G/51MC/d7AgnusKNDuW2Z1fo6aa7sGeOdr4z1uqVjTvV8w70o/vd64cHO+B5e/aka311XE+8Yp2e7eysvel2d2jYmT52Zev93ZmPN97nTeSIr2XiJY8qyjei8oo/POSwj73sKz3y/q5eI63PeKMZvWjX954lDgi+8IdP/OGfxuYGjvngpY2smr/cTiRxQIXZqvysh74+P4cK9AfwuKDLffk9f3z/TE5iAQ2Y//wa4ABKum/40Yf/+g83iQcMmoGTsL/ukr/79wmv/ZJ8wKAmYH+X13nOxhYlwQEYlQAeYBL3t3fu13cP+HcGOAAZ8H8Y9QH1RxINCHr5120d+FqFQRIWkIA4YQElsYH5Bn9XJoFrQYEkaBUZiIL/RoABxxYoAAKkYiC1oX7cN4AqSHZhgYOlAh9WgYEa6IMfuG9J+HA3mIP9ZBU82INR90iNh3hFFxUkUYEJGIAniIQRaIX7R3pYSBIIiFEL2IVTiEqCZ31OFhYmYQIGpQECmIbgtIbM14ZjSBLzZyIZiIbeB34QGIbk1oImwQEWcIiIaAFnyIBeKIiB/wiILOiG0CeDYveDnpeHaEKJnGeJBUiIdqKJcMd2S/h0C5EppngpMZAQphcWqFd7tpd66MVxszeLtFiLVuWKjnZ7GZF7Kbd7vqeLGMGLGoF8AuZyxDhg1XeHXygzzsdyONdqKwiGkJgX2ad2dNhTowiEhYd/yzh5Klh5stGI0+hl37h5oYh5owiOMhF53ah/4+h2ZRd4otiOmrd4RmaH74eH1HaNiIWO9AiC9rhnVUiO+miNf8iG/6iEV7iPB6mMjuh4aQd54oiQDymN/GeQ7VeRBJmQD6daE+mQ7+iOFymR/Ohd+PiII1llJfliA+mNBUmSDZmPHJlX5iiP/qiR8P/4eSmYjZe4j6sYFa14crD4isMii7Z4lEiZlHmDi7wHjBchjLjni7r3e71IleWSPMkokzgpkmQDNdG4kVvpgSmDlV/pkjO5NmIZkmmplqIXlh2VlSgJkhQpMl6JdXJ5l/RClnaplWzpWnAJkTyJlnrZklzJlwQzmCcJmAlJYn9pkXgZl3lZl4S5lnN5mJKZmI5pmF05mW3Zl9PDmGVZmJA5mld5mfPolgr5NJyZmp4JPKC5l6SZmT9jmjfZmmjZmY+pmKjZmNOCmKdpm2gJlcEolVXplGxDnKyHnFFplf6ilM75nNCJK0I5nftVANZ5ndiZndppnQmxnd7pnSIxlKr/iVffWZ7Y2Z3maZ4d0Ywv95nwNkHpqZ4CEJ/lmRAwkHMysXOCSZv9QZ/fiZ7+qZ0J8QAxOZu0FKDbCaAIep0DuhAlgAEQGqEYIAFquDa+aR0Lmp0KmqENWhAjwAAgGqIhWqFdc6H9maEMOp8oyp0CQKAE0QAiGqMM0AJ1aKH8CSUryqI5WgAdOhAlIKMxanruGXrwmaMbuqA9GgAqAKQxigHYOJY3eiU7eqQImqQywKQxSqEmaaMHaqQquqI9+qFYKqIs0I9WE6VdMqVfiqINqkikQgIrAKQqsKUliqbLoqZqSqCnQgIyWgJmijMmiqNemqcB4KajAqcyGnFD+oFFfAqma8qhLVoQYwqiI/Ck4zlleOqlLjoQYoqlNVqnXeqohFoQLIClWvqnMROoUjqommoQEsCkTvqpZxqqbPqoSBqprioBurqrEvCTi/qPjVqro8qNl9pmmeqom+qAxep0x1qrycqBEyOcT6mcu0ilAWqt/pkQLxAZQfkYAQEAIfkEAAoAAAAsmAAbAMUA0QGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nnyNnbAHp6/gAA/osASkCIQzZ+0eXlsrG/eitkaUtw1nodaEpKe1JjwRMtKxcAWkR8ijk5tmw2jFpWvnAwLAAATldXyHQoL2dnMChckiNRqwAAq10AzRgYnGFKtxc1ricnomRF3AwWABcXAFVVRTFMZjJzph1Dqqm2vrzFu9LS0w4h4X8WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AGQQYSLCgQYMMDggQIPCgw4MMFgKYSLGixYsTFx5o+LBjgIgCNnr0CBKjSZMLHQxYybKly5cJNHIcCZGmRwILbXaUeLInxYUEdD4EKtQhT589FyJ4ybRpTAFBi0q9mXMqwaNIUUK1enUr1wBYs15U2rRsy6dRv37FKUBtWLEViX6Vy/Ut3IwClpo1i1atWrZuBdzFSNdq4al275LdW7avX66A5woebPGwVMtFE8NdzJip48dTI9edTPmn19FpDZMuDYBzZ5efQRcVrZq16dSIT9e23Trva8+YZY+knZt3b9yXdRfn7fr3ytjCbRJPbjy4Tus2NYtt7hx69OFVR1f/V54ceebVpbn/9v694/Tz480LxU5Te1b1r9m3d/h+PnrK9I0UoEf2IYVfZ/rtZ1B/1/032IA7kQcfc745dxaE7TGYnYOKSeiffA2Op5eFz2H4nYb1cbiZhw2CuKGIJLKUoIIEoSigituxuKGLKcIY4wAz0hiAjQTieJ+OKfJ4o48xBkkjkRHGF5iSRTJJopMKQjmUkQYieSOVUVI44pUmRqelUVz6VKZBaxZUoJoVNtnmYwwgEJ5qBthmgFIzJYdAn+flydqeeSVg6KGIJqroU39KttCjkEYq6aSUVmrppZhmqummnHbq6aeghirqqKSWauqpmyJAwKqstuqqqwcQ/6rqq7S+aqcAgpZGqAEH1Oorq7fy+uuvweq5kLDD1hpsr8nS2qhVCS0EqE4lUSgStDJxVa1t2WIb0rQ2gavTmQe9mZSXRYK5pZSSqXvinctxi26E7pab5rn1sjlvhvBSR2G+bu6LJruoCUlVW5IRrBrAXSmcG8OPkauvw+UFRvF5EPslccAXf2jxv1Ma7F6/E8qb8ZwDmXsSymAJbCbJ/nXc4scmhywyfzCHCHK7NLPGMst/5fzizgUnTPTCN+OMsHhHP9xzei7rm3HQS+9WM89GX1100gVt3HDTFWftc9QBT72W0D2CjfHTAJLdldmQob2k2h6LDfXJbkcsd5V0z/9sd9t4w22V1ynfu3LeLeMt845cL7h3mFoj/feDiAN9dtXxjh0425Rv3niNj6/bN+OTd+j553WGjmaulMkqborP4mlsXq/fGHtuqOau++689+7778AHL3zuszYLq6zG23rs7MgmD+yxzDq/arGDHusAAthnr/323EMvPbC1dxQtQ9qqrNW1U4GEvlTbjq3Sj0+tX1T4B3Nuus1MyzumhVjuR3jLi0uS/Va0v+5YLm6Y81fknFY6Av4ISAccnOrsFcAvDTBHBVxPBEMzwYmNToANxOAD+8evBJZMc/iz2tgymJ8NSuV/5iNM5WZoJf65cDYd5NgHLRjCI7EQQTcUCgz/DaeV06kQaj9kDAnfZcKY7TBdF/ThCIM4rhx+bYFhy98Kp4g4qkWxS0bMHBK5KDgONlFnWFxbD7uUxL0s8WVnHFoa66bFMcKPitKxYuEqCMU1wqmNfMEjTYbIR3p98Y9k/Bzo4pi2Ofqtjm0DZGMECR5Gzs2RpIMk5STpFErWz4/4SqEYI5lIRQ5JjwB8oiFBeThOAqeL2rpVYFg3GNdJ5nbJoeVdZLWoXi6KT44anjCHScxiGvOYyEwmpLjHzGY6QFbNjKb2llc9XEVPesv63vSoqSvvaTOb30zAAx2wEHHKiYgylJ9Q1Ec/h7QPauqkVrfSZ84Y3aqeNkTnWGao/zhVbilj+LTQPe+oz8rw85ChxBpXAuqcgZ6zkP9E6OHCKBWG/sahZCpoXA7KyiKKsqIPxGg+IYqmfmIShAsNaTkJSlJ7mRSFCrWKRV8jUgNq9DYfPeHdciqUmXamphq8KV4oqlPA8VQnPmUMUFso1OMcVY4w3ZpMVSqApAayqT+joT9LqharmmWpQMQqRzV5v5hOxatlAasSY7hPojrxpDxM6Y/U6ka2GtStaIyq5ORqz5U+dKsulahHzQrSufo1oy2VmmBliFeboLUpdL1qYsu22LY+lSaPZUpkJynWxjZSrwzkK4k228nOXvaSoM3iVA1bVZYCVrEdZexpPZLZl/+Q9pWTfVtl7zrbjvjyt4e6J3B/y83WAXM0uAzU7JLrH+Y2SJnQja50p0vd6v6ueNKLlVK0SQDqddOa3AXn97xrXFxdT5rS9OZ42wkRhZDPW+OJZ7jmyT67bvR955QvTdirtN1u1LOoteM5y/hCVNoXp4Qt6iZLqUhCvpaysd2nK2HiyZFZkm9w7SNZHUhQAuPwwpBLrRo3LMIOm/KUIBZdhldJYima2JQOXnFEI2zQCcOmwg+JsYjpeERSvrjBBjZtgt+6xR9/Tsc7HXJeBZxRDwsxyLkt3EuZnE8nVzHFA3uwbmm8URtfCJYI9C+CpTrKBRu5cUg2qpKhSmWbWjn/j1imoJalLGa8eFlGOO4vl8e81x6becAnTnPneqviIgMaxlCec+Lq3Js7lwjMEoyzB2XM1T3bmcFHTjSlA2vpRmOaa6mTdMB0CRdbIpe/+iK1WHg5XF8eVzXWjbWsZ03rWicTu87Tbl7CW9xaqhebv3YeeX0N3m8GO3nO3a97UU2Qd7ZNvyNhZ/nECm2S0Hd+Xux0VqesZjJzTdBl9baCw91nNGt6x4/0M7lDm2lRXxHdmVT3igCcpXMnWdxEvne5v23vbu973PMmNBMZvW2Cj7XdBqd3iPXNbnO7e4+KzmqUF43oh6dy07BtMRgFDseEc3zS8EYpwrV98DKvW7UO//f4mj/LcJTz2+IHHurHdYhxCAMZ5kLG95L93fCXq1znbOa5y5MG7oCvPMBCH/HINQ4nhRe65UpPOcmdnuWab/nmP/93vpPOY1DLUjKqzoqpVZPsFIUdKWPPTdlvZOu2u/3tcI/7pc7uE+QZG1fMu6awj228Ye/SeuhNb7HHOx5mD8TZlKu2+K69zpj3Br8ZVfxDGDCerPcc4CU+NFcqf5ALRCACLBAACz5/ATlbnc4kdzQEEcf5gjBgAbC3gAAsAHsJmD7kcZV35pusltYTJAOwX8Cjgq8BkEO96wo0NO+/4vsARCD4wl8I9Gfi+IKn/tNSab4HoD/84GeA5rjXsP/uXax5q/heA9CPvgDSH4F3Hz/dJudw+afi+/SrP/0ecD/X4Z/8Ngc1Y61XAvbXfdBHARB3ehTHdOeiem9kEK33fOlHgMFXehcXfiw2fmyEfUXhe9sHfbJHe9BXAvo3aEeHYcpXZb1HIQVxAek3AhYwAukHfu8Xb/G3eyjIfCpYEChgf+lXfCN4clHXfz42f9mXg83Gg8GXfzK4fzQohH+2fJtnhASBfkjYfktIgkDHckMIheYnhb8nAWAYhhKAArc3gyJXg+THhfTnhU4IhMiHeWl4g1EoL5Y3dFv3hHLYhXQ4dTP3g/KnhkW4hwo4UX14gCfoZmpBdz1BKLO0XIb/VzhrVySKeBKs1mq/RDvBJHeauImc2InHhGvJo2ug2He99neDB2zmFXjRVIqlxnfNIl7Y9IjjI4vV9y3TBnkjZS20aIvQkm2DOFjIxUUTl2cHUXQ5gjcMWItU13F8eEvCGHGQZkZ1eB7JmHNaJzLGeCTI+IwISIyOg3PDGCeIBY1vNkj9hoVkx40WOGNY14zB6FrdGI0FBo7QWI3hWI6VNI3NpY5mmHtS94uy5YzwuI6V1o4AaVkC+VfxiI+fdJC8lZDjuJCBdo5u2CD2SI4TSY/xeJESWXH6aJH8yIRn6HPumI4D2Y/i948Y2HSBwZEEyWkGuZIJ9Y4K+ZIZt3Qy/0mIEJmLHYk6Xzcak2gSaecnlthLrLhqr6Z2j9gyntiUTvmUUCkqQYkRu1JNU3kRVWmVzLOVWjko40EhXwmWYikvYWkcZHmWaDk2ZamWY2mWUNOWbwmXbbOWcemWdXmXeDmXckk5dGmXK7KXHdKXfAmYf+mXesmWaZmXg5mYh2mYhYmYkKmYgUmYOeKYiymZjxmZjYmZlWmZmbmZoHmZnHkkgumZWkGZpImaXVKan2mah6OacMKancmYoumap0mbk4mbrVmbvGmbBgWb5yKbqambs+mbt6mZvbmbuYmcy2mc+wScrwmdx8mcyumcvSGdMiScq4mdz2md2Umcw0mdxf85mtvpnb8JnuUpnuFJnrFpnuepnunJnsHJne/pnnhBnxulne2Jnvtpn9fJn/MJoNEpoNOpK1xZXldpEVn5XQlaEQuKoAdaXlE5oRRaoRM6is0iiryGd13JoBFKbM2DiiEqbLK4bNOmnwEKn3Aiee7EeNTiizlZoKHZnPsHAgIAAilJkg6Zn/hpGhdDARTwKEBKARRocziJhvE5o9V5jAdBAbBHgEpopCqJpP0pnwOaWjPwpNIXfAYopToao99JoGEKWlR4f8Fne15KdBS5pOuppEx6hMEngQvgg6gXk1SaolYqo+gIfHG6pdN3dUfahmyapMn5ps4XgX7qfYA6pYL/Op5u2qZY2IF9un72R4He2DVr6qiFCqnhVqaxN3v2h6YJGKhwSKg0uqkbRxASAH0uCIPst6hfeqdXmqdjqm8QiISw932wqqYaqaJ4+qip+nu4CntFOqqMWqpVCqzJio4BwIK4KoJpejPZaKqDuqwnt4NICJOkeoen2q3VOpMFkQEbMK7kugFdGq3YmKmc+q2zKpL+GKuNuq6a6q2Geo0GM63Wyq56WpFNyKsGh6Ltyqx2aDCh1ogdCqEHS2yRGCELuyUW+rAQG7Fth6HJoqF316AU8aAg+qGmOKLI5orJ0rDtJS0n2qP3abK9waIQ4aLhAqOyuq/6WqvumqP+SnIA/wuz9Xp50qqu1DqvMYuQWfgkPJuvPlu02liIGjO0v4qqRHu0Jeg/Shuw9CqvThu09dartNqdYgq09iok+Lq0U9uzVdu1Qou1ygq2P/uQVgu1Zsu0aGu0wUq2V/uvKKuMSOuy8Sq2etu0cauz6dq2Ycu3gguufnuvUYuzVLu3hDuwhgu4acujW6u2csu2dBu5kGuT6Nq4leurUmt0a1tCm5u19YmSF3is3Pq4Plq3JQevyPq2iTu4Ovm50VGwYMexrcihHlpNImsvu6svEvu7wBu8yESxw2Kx43WUYoe8aKe8dce8iwiywwKLJKotJgpfqnuz6bSLKosQeNu6nQu3sP8LjJMLujZ7vRjpkeVruanbk9u6c277va+7uEFYszu6vpyLuH3LuF57uDL7vvjLkk9LvvV7supLwJi7qzvruODruoobu+M7cOl7v/3LryNJv2CqtRKMwaTLju0bdP47wagrcwEMwQP8nwVswgdcpx2shR+swWfrwIW7vwocv/DbwOIbw2Ubui/8v/L7hgmswy08ujNbuqx7ugtcw+EbkLLLjCVci9jLtTg8txEsupe7wQVpFQ1wOVmMxc5LiV0slF9MlWGMlSjTAGUsvGicxmrMOxg7ERp7u20MAG8Mx7aLlLiLsLpyAHq8x3zcx36cJxrhx4I8yE5svmTJK4OcyHv/LBGIrMiJXKIka70nXMjysr2ux7L71b1GTMM8jMQ9zH8W/LIgfMSdfMP6m8NTvMOjPLZRTLmpHMRVPMQcbLruG7gMnMRQfMpS3MRi9cSS28oCfMFCDMv2a8XaSssebMuevMymPL8//MrKXMpK/MBMLMyxTMwGbMw3icwsHM2rbMPTDMwkbM3FrMouLLDO/LdA7M3nTMHvGsp5i8vDzM6/rMuuzMu+fM3uTLPPjM+GnMLGWsS1HMIoTMV8Js7VLMrtTNB2O8IJHc+3HNHMHM72/BBbHGkXLRVmfMcbm7AdOzsQs9EhvcYkXdImzSlxPMfJy9Ef7dF07NJ2nNKOrMiA/xwSM53IlJzB8/yXjXzTfczIPi3IkPxe6ZPTBp3Nz6a9sqjJA03K3yzP/3W3l7PODG2NCC0cXzvRC+3UuZzOmgvNVX2PGUnVXL3TOVvR46zQZs3JT/3J/drP5IzU2FzQsnzF3Ix0c23UdX3MAp3MYa26y4jV/LvVbE3Y+evVMkzWhb3Wh+3D6gzWZa3PnkvNgj3D4GzYUH3QaP3Q3ivNmC3RzezYX+3Pk2zVm13Zin3ZjA3AS4zakL3Ykn3WiI3KpK3Tsc3Kpy0bWe3Zqw3aFD3bHpHR8yjcQrHRKT3GCorcDqrcGXvGebPRJx3d0h3dx83SMV3HK13d2o3daBfUf/+cst790738z2/Z0+EN1OG9x0O9i/lcznnNi0UN3+zD1H4d2e5Nz1Ht0K5d20dN1+gs2on92qp9240NynCt1gSe2XK9z0QMz53d1lrd2zCc26Cx2xDO2wnu1hV84BAd4Rnu4ZoN3Pcc1/6N3/c92Vet24Mt4Rh+4rIN4LRN4nr93oGt4pat4CVe1TVe4Sv+4S2+4ChO4Xpz47792SAuwq1t46mN4w2d5DxO5Eee4/aN5JSt5ALO5KYt4sGM4C4O211e4G/92Pxtziz+2zC+yzI+3qW94wNB3E8mAG5uE8bN3c1r3dddTSP93NO953yextudu4COx4Le0YFeSw5w6Ij/nuiKvuh4kd58POMmDuSVad7ejd6OXlsuQU5EXV9qbttfvqJKrS2Y3hI/mebtLem4reUOwVATgAGu/uoY0AG21eOfXuRl3tVn7lsu8QEK0Ou+7usBVepcjuoDXusTruoHEVAk8OvMfgIuIewd/uNS7uVUnuIEgU8dwOza/gGkTuvEjuVi/RX1lAApoO3MTgL1BO0PbuTS3uRVfu0sMQHmru0TwBLqvsnF/u22nt9OPhDmlADzbu7mdO9NTe3Tnu9Opd8FIU4rQCkcIALaLusDQPD1bfCQHuTIbhDipAKVwgHnPvDefvDgfr4ptQIcLykPz+wSP/Ehf/FTnvD9HgDj/77sAa8Azm7vLd/p/W19opXtNc/tOA/l7a7ze73NhcUSJxDwGPDsOX/qIq/h79xTux7wwd705F30mYtZL9EBE9D1Xj8BQN/tQn/hZM/uZm7gSEVVS77vxh7aaO9Yan/lbK/vx57rDzHqQb/2Ue7u1u7vRakoMbAQcU4Tcw7T2U3nz6vnit/njN/4Ffrng97ShS75kf/SeezosQLemO/yFt/LlB7Ulp7e612ype30mBxtp08S9N3Nf73mUh1mcr/3WW73aR3tZX/ruM/v7z7kej/04W6ntm/2wp/7IU77nI3vI+/6Cv/kvX/7Pn72YT7apn71//32AT7m7/37K4zXkf/+9LLP5kk79sP//ONf/NYf48Pu/b5P8ttvgjtP9NUf/dc//aXP/nft/mRO/sRf7UIe/s1f/gABAIAAAQINHkSY0CBBAgEcPoQYUWIAhhMtXsSIkQDBjBkJKgQJ8mNIkgsLliw5EmXIih0vtnQZU+ZGATInqlypEGdOhDt5mvypU0BDmxFhFkUqkWbShz6DOv0JlafUnEeRWi3agGkAmlqTNiBoIOhBA2HHGiwrQOxZAGnXnk1L9CtWmWAJ3sWbV+9evn39/gUcWPBgwoUNH0acWPFixo0dPyb8dqxbtpQrm72slm1bzHA7T/4cVPJZqihLmz65+XTK1GxXk3y9mbX/7NgiW5O+Pba2UNkDc/d++nuq8OG9dyc83pN41eXAUdNu/hy68eizpzt3XZ2l9u3UvV9XzR27cvDZv4c/b368bvHI27tPjzs++/X00ZeXj9/+/frF+av/L78A9+tvpeSA0i+4+RRMsEDbGvRvQAYljOq99Q4UCMMMLTxIQ98WrNBB5iAckUQDOUSQQhHJU7HEFk8EMcIVu3tRuhqtuxE2FJ3T0EMfd/zQRBtnfDBHGo0sEkneiDxSQACfdDJKAplkEcopr5zQyiyp7HDHH2N0UcsQuYRPyTLNrFJMGeHazDLQNMtstJ/cfFNOnugULbQ5IeOzTz//BDRQQQcltFA//+3MCc879VxUgAIehTRSSSd9lCBKL72UIATaZDTRTldCdM0t2cO01EgtNdVUhgw4oFVXX4U11i/JTBPLClNV1VFcMSUIBgaYYmBWWlOUMrhdedX12EkJemArioD0EMwhcVOWUlSrPVWAZiUyoQJvv60AAomEHTZINZnDVtJr0y2AWYlCUCBeeeUdF1ogpcUxO3YhXTdddyFqYF6BFXDBKHvLJdbW4fatNFl2/33IhIEF9sohcoft0V6G23XYX20hamFigSuA6GJaM6Zu436xhTgAGUQWWFyLD0bY3GJvZXjlaiGGF+Z5U2iKZoRRhk7ljln+GAS+OBBh4hZmxndFoknDM9roZv3iYGATgo5axKn1zfnonZNeummKuRZSai81DtvqiHyON4SShS73a2rbDnvbh3qG2eCuHbSbVLz3bdmhFGCWGW00DwoIACH5BAAKAAAALH8AGwDeANcBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev4AAP6LAEpAiUM2ftHl5bKxv3orZGhKSsETLSsXAGJHdYs4ONt7Gk1XV/+vTywAAMl0KP+TEv+yVf/hvf/lxixoaJIjUY1aVasAAKtdAKdlQs0XF/+hMHlRZbcXNa0nJ6qqrgAXFwBVVSgjHjAoXEAvT1pEfGYyc9sLGv++b//Ki//RmaYdQ69pPL68xbvS0tMOIf/Eff/dtAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQwCMCwocOHDxkcSLgQokWIDBIe3Lgx4YGKF0MGyCjgo0iRJDmqJJjQwYCXMGPKnJnAI8iTGFfqTIhTJAGePS9q1KkyIYGgQgUcRQpxKNGOAhDMnEq1plKmEJ9yBIq14U8BXR061VrQaNiGZs8GGEt2YEKpVOPCtLr0bFuDXMN+VbtWwN2yV9WmPcv27lu5iOny/csSrNq9gv0yRhiYcOWwhdseRhxXsdrJlPlCtgwawGDMl7tmJruZ81TPdkHn7ToadenTqlNjXa21tWuZsMPedny2du7hdVEnPy476u/XuLEOFz17t+TJ0ZlmR8r7qe/nL4N3/53+uLr264y3B1XfszvR7+DFS29Onbjt5sut5z8/HC74uezhRF5x5nGH3l8BnpSgSO7t5Nx/AOrG1IB6FbjegYZJaOB+BvYHYYQcBkUhbRa2h6FmGl4YookefjiAfBPSV559zGGXookr4tTgSvA9ByNSI2JlnHXI8bVgSDsW9eCHP4ooI4E0EomfkTfqeCJrS0LYZE9BMjUkf1NGlqOCV/aW5X9bCvhkhVGCaeOYDFZJZotMHnlRl0h92WGYlsGJZJnenRmfnRbhGZSeFxYpJpV0akkoRAZMZkCJJzGAAKVkRsrYpFHdZB0Cnp6n6V+cIpDAqaimquqqVoEaWUKwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyCarbK2j3lUqAdBGK+200x7wLLXYUnupAM22xakBB2QrbrTbgjvuuOVKmpC552ZbbrjtYovAcKEGJRFFZ6XUnElhkcRvV/ra+C9W/tbbEwOG9oQoi3wqx2jDufl555oktrnnm1RKbFGSW8kZp8ZZUSwkpnEq2ufDGC/6mchekvynyQ5HBnPEi7Gcp8tJzawfyul5/CfIDyWM08JW6nwe0GIB+p7PSSHdkNAnET0nxDvLTPXRNdtYH88IMr2x100p7aDTDD0atM2H4vy10RtynSHZfcENtU9qh822im6jCLfZDs3/HZLUJV/dttUpn7yy1jPmjeXeYD/EMVSMy422wnU7LjaPjSfN+N04Zt3z1oT3HLniZo5++OeJh9616aQJjvfpXYPeeuExzy56xp7Hnrrtq+Ou+tu+x4Y4lKQHyvp9tNMMe4ayI3+7yrwDD71wkw9dedKcW7m5652r1S1ZnKpl6fVocatup5G5atn3WpVqsInqo7bs/PTXb//9+Oev//78998r+0+5VryqJcABkmtd52OXAQ/ILXgtEFrp2tS6HICAClrwghjM4Loc+MB5Ned9lZqIQvL1uIPYhIQlASFKSogXAbjERVYZGFMQVr2oka9sl1PS8WrUM/84KnNPqyHd/yyWKO5pr3hL8yGa+BbE4bEJiWMLnvO6psRBAZEhfrsI4F5mRDJtz0ZV9BETsSjEv92wL9nzIhQxF8bfpOkkWbTIFnPWxY+tUUltdM0bRRJHiMxxbXX82R07lkfO7DEkfXzIH+0WyKYNEiqFTMwYA5BIhyzScmm04+9QFEm5HHJiTqzYI014xbiNsoWd7MwkK+mVM7IQMDuUEhhd9KJVllGLrsxhx2Lpph7S8pOFuqUcc5lJQW4SS6msii1DOTIiMix5VYseJ3+5TNQR75ilkyIPqUjNUrKSIZfEXiO/9kVfwrCaurumNBenTVmas07eFKYfiTnOsJWTm+eMJzNb5v/MotXTcvfMUDKho09rPhGbxmtnL/EJT7gBkCjhy9e2qPRQnbgvfSqMU0VXUipWeZRVb8non/xH0pKa9KQoTalKV5qsjaqkVBmMaUwdAFOZ2tSCCJRgAx/IQAV2MKek2iBPISjUoXrQRgmgpQMSklR4DkeGSCkYCp8qUotItV/DaeqHtqXVJeoScgq92PMMJ7yedfU/XM1nMR2J0KXxEkjNOSt40urUf2rulLAMq5OQSku6/nCt5MRrY966V7P2lalqtWv5Aqo3vXIprocVgFzd+MrBOtaf0MTa8lA02d/41auAtadg3VJKdGaos675rBVDC9DRUoawj+Wri1Qrxq//khK2mB1r7agn260itq6ZHdw6szm98UB2tr/9q2JxyFh2Fnc+vYUQbSlr2xbidmrBfV1Zu4ZazkxXj5Ul7XUDl93ubfe0ke2uJ8P72stiV7fKOy9n05vY8h6xrVF8boyii9bkgna5psQv5saLyOP6VrL1hW80pyg9svLWsMhFMHAVrNnhJlS/cOXvXP27WgBPkr2mKW1BuUvfCfcOw0W0rxq9d76PuhhVXH2xi4HqrJBiNDIu5chFLRO/3LD0x0AOspCHTOQizyrHGyngAq31lqESIIJB3alRi8pTKNeYWxS86U2p3EF65UuEVcUIiE/YLzIDbMwu7CZU7fXNAISz/3ysvauA8dhNye3zZv1874kdvE2B1jl3zNsdgxuL4me+84d2Nqgo57xL95L30F5NdDoPamG3OpqLs8ynpAOtzkE7l8/uZCiiAY2i5vWZ0KBeqJ81TWosmTrUDd7tqZH55826WtCzJm6qxSrqSLfaTK9WNaplDetpstrWwMZ1sT9NbGHT+tjyvXWnc33hXacY0lbcdKmV7WxdN5vXq27orwMVbHAPO76eNtNAaWLabU972d5GN7WTWOtoJ/vd3a72t6/d62yPe2nl5nes5Q3vQK0bOO2WNqXTrW+C55ve0H7wpBdd6fxa29D9rq22FU5xhlu60LnNOHUdej7xTRTH5//r8afCHDYkH6SjMv6ojS1j5Jrb/OY4z7nOmZVyJzM5Kk62sre4vMB3BZ3GQ5dylYluwKP2jOUOudcIy1xdwKz5YGYmGJqvjpOrAqzNb8ZhnBfr2hATGJSKbqZFYEABCrxAAC9oe6jQfHZAqliT9ia3shmwgL5PQAAT6LsFGHl3Y1Z8wJdG+8TV/pAb9H0BsHp8CDDpYRGPnbn/dpCpKfB4yCek8zehe+LtTmHhSpzTCw+ABjof+ceTQJyFZ+vhdTj6kN05bfY5Qec9L4DdUwDOlW9uvBds3NtTzj675/3uNQD82AeW0WAFORzlqUiuhCD5re/8CcQe/LJ/eMSop7j/7nef/cfDgPvOFy30b1v7sxnfevZZfef/HvjOvx79pdeuxy2+79imnZ8OwXmdNwITMAK7B3v5Z177h3jSx0fUZ0mzQQLJt3uT13wJeF+z12gNWGDvZ0M0wncT2HfMh4B71n8hN3DEB13/h2cQcX0h+HskiIIVtoC0t4GKF36M9xAaYAE82IMWcH8xeG4p+HD853AZtoK4530iJnwNN4RHuHgAuH7W1X5ByGxGWFhQyIJSmFc2SHoleIX+l4VJuIWW1YWEd4ErdhYuZxAR1S8ntz4pB3VJo3KiEoc3RnM7l4d6uId82IfLsoYF8S09N2Xmo1M+VXRMN0BCBz4TpGVb/6Z0XdYcXkZ1VIVCXFcpaPZC8HSJKDEcbSZ6Zkh5mSZuyBYon1h1ZXhxJ2hspJh3S3OKlxdgGQhJ9XZ6KAKL3UeGpHVwMQFMttczuJh+raWLlMGLIJJ5mBOMaIh3NEhItVh8wPiAFviFTihwrDhqpfiK0oh/1DiD8+YgxhgeCWcmytiNpveNbPSMKtg15SiD51hwENeKtogl7SiE3giP4KiO+xWNHfho5qh/6EhnEQeN7LiNaJSLs0hK4VhL4HeLBgmKqqhn4YaNrugg9WiF1YhxE+lr2WiRD4mK4kWF07iR/taRyfiRsfh9jcKRFXmS/Yhpy2h4zUiLA7mOGXKRw/93j0SYjjW5jwX5knQkjHKWkKikj0gBiAQhiDpFUXbIY3JYPkg5EDAXcyCFPnjoh1iZlVq5lXwYlQKhZAb0cwhwdIUYZYfYdI3oiDKFdIwIiYjolk03iQADZlO1L085Ei3RTZVIdZwYEjQElMOUZ/7ojheykFaRkg3JcTk4k+yXPnqJkPN4b6mHjwzIY48plGRnkkoScBppjwZimBBpgtMHmPMkmDD5j/BzmTEpe5Gpd/hmbhh5HqAJku0Vin1jkGF3kJiJQ7OJmBsnmR0XkBpomQmGmhjYmgDHbbCZk5+pmsaZhsipecppjbHZnMVJmApIkDgYhUTJhcRpYth5nNr/6W6TuZM1+J3KtZuyGJ2Yw5mrWJ2F6ZzhCZ3jqZjcyZhT6JjX6ZkAaZPkGZyUeZ6o0ZuQWZ/AuZjCGX3o+V/qOY6uWZ7L2YSyKZ/8mZ3+aZ9a2J2pOKAUCp8W6pPbmaH46Z0cup8eKp4XeqD3maCNuaAd1qCl5JUA0IYA84aoIaM7NqBU6VFs2T4zN6B32RdcOaREWqRGilI42qMBpKQWxaRNWpZXlqRQmnRSiqOeKIk2cqVZiqVb2qWlwY9gepNcGqZi6qUGoQIFoAJK8qVj+pNu+qZlSqYOaRAe0AEtIAAt0AF6SkpsaqZx+qeAOqdySo8G0QF9l30c0EJ9Oqjk/+inggqnjxqoJnCon/d4HgAYiwqphMqoptimgbqpA0EDj1d+JoCpmRqpqNqojgqqmqqqA7EBo1qpj1cDjXGqqeqqn4qrudqpZjoDrCerfbcBtWqrunqr2riqxcqqceoB5AesfTcDpEWsyTqtx8qp1dqqvAoAHNCsvZd80Sqt17qrHoms2Yqt1VoD3Jp8pUoZ4Bquxjqu1gqv5iqvk/p49Jd8l8qu7eqS8bqmnvqu8vqnzPp4BGiAnZeo37qvHaOl88qvDeuvfrqtIbgAwjqsCgsVDCuuAQuwDhuosBqCNGCqF8un/0qt7qqsHAuxBLEDIVixFjuyItuvC1uyJruxKP+rrRiQszqLATygqDDrs+RqszXbsSk7s0G7r2hGs+WqsUR7s0MLrkl7tE37tEYrs1X7s/oqtVfLtCr7sFtbtEmWQGJrlmNLtoZYtlF6Pke6tmzbtm47LFI6lkMllmRZpWhLpQnEQW95looolwRDl5SotCdLtVDRlxfhdQQDdvSktV/rtBL6jiD6nwgaoI1LuCQLo7/5oABqnpW7tGDboqLpgKRZfaYZlFaLsYJbhBmpJqMLgaXrhVzbuYP7uP0ZuRg6hhqasKd7uav5fC25mdPZmY47u8Srujr5hCGKuyP6sl6LupiLjMD7mtRpuUC7u/kZkaKLhMenhBkbuwoauhz/qL3wx72pO7W0+6HIK7krSrnOa70x+5zMaKCaO7mc277Ny7vwK5Pym5zSK7zU+77eC7pgyLri64Hky7j2O5/xm6Lzu771i78BXL35y5r7K539+57/y7wRTKIDPJoFPEQH7L4aXKEoarsqKqIsKsEbPMInSp8MzL8QOr2eO7xCe74lfJQlJ1G5ZLdn28NRRoefGaRvO8REXMRFHLc+B5aK6KQcxcQv5cQ6BsVJlojxYnRG5bczBLhnRpsh1r2f20KGa1VZN0OK+7pneL8q/MUcvLoeLIbbS4xdXL5dq8D6+8IWHMP+O8MZrLsT7LvsGb14jMF6PMjFW5nge4Pqi8Ls/wvBaszCzFm76Xu7b5y7WSvCfEzHFGzH7Rm8glzIc9zIIWmbTfTBZmTGomjJlYzGa3y8WJi8k7y8lwzKqYzJflzBm3zBEinLcYzAAszG2evG4wvHUcvL13vIwdS6rWTKVbjHs6zL35e5MLy5EerJslvDH4e94QvMBizMXDzMvat+v9sx7pnLNGy+1Py9HfzLrhzMlLzLqOzOfQzOfyzOnEzOzAzPKxzK2IzIkszOsNzM5fzJLbzAJtzAivzAaRzQ1WzI6ZzN67zN7ezNqryhDX0RUmpyO3y3bcnDPyzEVik/RhzSIj3SfYjEc6vEVSzFL6fSbMjSgejSSUnF7WLFVf+GxVGlxVrXzTrtxVgSxmKWQvlSxiE80bE80HVc0NFMv9NszUxtzsYLuZF8wsqbwgDszDpdd7+ozSDMzTx9z88MvfSMy4Np1V1twy6M1HcszTJ8zoys0Ojsyw6dyFO9yAnt1VctkmSEzOC0uO8s0bQsz7YMyGqdx2xd14T81JDcynL9ylTtyIft1AwN1/ws1YxN11Xt1m39yOir2P0M0f+Mz2RdoJos2Eq91k0t0HaN1e5HyrikzCMZ2sS8ylDN2ZTtz41d1Hbtm2ANFeM81pht2IUt24kdhg+91RG903I8nMac1cVdykOdzwCt2TdM2wY91wh92bkt2kwhpQ0wI93/HRYNANNSKd5fSd4zat6cQjbhfUXrTdLu/d7wPWQcnbZmW9/0bd9468PO8lQH0N/+/d8ADuCR4hEBXuAG7tewDYzgYuAM7t8aseANzuA2bS84PUMI/tvYHSg+HRFjHFVCzdXJncAkfNZRXd2Vfd2OHdwpjtibTdyLbduWveKQjdpmTdAlntQOvNQzvtA0fs3Lvdpa7dwgHtsyLqAVfcysHZjPneDxPIzhzNv17NvZTeT6/OO3qdduxtdEHd2PbeSSjeRB3tpLjuFFrtxHztwv7tm3zeUqjts1ftQ3ntalTdinzeN2/tas7OKdbdyffeGpjdeUhJtaDt2gTeZVfuZA/97cYj7kfX3XopzXSV6aYz7l3+zk8wzlYn2ahO7nb57JaH3LgWzPXX7nmd3ptTzaYR3qUj7qIm7otbnPYK7oSs7oW17o0k3iQcHd3n0W6z3f+Y3fbaneY9Te8V3sxn7s++PrG63fv37fzt7s0A4+/B3hBj7gJUHtDM7pbc7mFgnh2A7gD/7tBT7hB1PhUaXtdd7qZrLhUdfhbCbors2Nm47cz6uZqT7YnZzupb7tZgfoH37cZc3vX23vmK7qms7k867aVx7ppDvprL7vLD7del7bah7jbi7wjg7raL7nQg7wIQ7xkZ3nBBzms+7xVM7tIT/bE2/iMI7iF6/vwB3xuP++8jh+0Dre4zCf4T6O6AtP8pJO6/Ou2wRPSr198K5u6w9fzDw/yj7f8ECP8CNu49Rd89Z986Qe8zuO5yo/8rL+8ybf6EL/5EQf5UZP6bU+8GLfQkVvumdP75U+lJc+9pnO9kH/8Ur/5RtP8Xy+5kiP8WEfFroOJd/dFb2u0T46pcuuU8LO3sje+I7/+Mei7IfvlUoJ7JNv+EuK+Je/KdMu7v5t7Z4f4Oie9SC/pt4e+uEe+v5N7l1n7vYy+jhP+mB8l4hLxvDu8H6v3XEO6vgu6rl/8v3+6IGO5bkJ+1ev8zIv9TQv5zlu2rKP/M9P0Xif6Gm+9xaP8rGf/WY+/T3/3/VO//Vt//eBfe9znu/R//Ln/+pWzvTe77q4n/NlrvXDzfXV3/F97vZtr/DsX/+LDv51X+8AEUDgQIIFCwJAmFDhQoYIBQgwGFFiAAIPJ14c+LDhxo0aOX5M6BHkR5EjOwoggBHjw5QqJ5okCdGlxIoyZxosCXNhTp0hBfRsyBMoy5sRiRY9CHSnTaQCazYlKLSnVJ1UYVo1eRRqAK1QlS7d6tRiWKwjy4I8G/Orwq5N2yJd6zMsxbFb03K8ezKuw5973xb9e3MvgLpQn5LtGzdv0MRrFzMMPDOyy8GFmx622/jr46WVNSudrDI0Rs9zMUPlzPbz0NVTW1dFOXf0/8XSYU+7fX01d9bdZnujjY245VYDcQ08bGAb+dYGD4uvPS7g+dfo05VWNz5bYvPgULk/BB9e/Hjy5c2fR59e/Xr27d2/hx9f/nz69e3fx59ff3nrQLFDdw5A6bIbUMD+evqPugANNM4BBx+EMEIJ+fLsAAsvxDBDDQ9IzSfPPvTrJwM2JFFDjUYsMcUDEhigRRdfhDFGBzrki4GtGKCRsN/UCrFCG6HCUYADfmyKARZjRBJJBHK0DCkmd8QLSr1CHA617qA68sULKuCyywouiHFJKRmTbUzIzOysRyrLrLKpLFv8QAE555zzAxjFrC0zEBVDUzXP2nTySjdhTIFOQ/9TuPPJMvd0rE8P1xRurjcvMLRSMF3EM8RF1eST0c0EDRTQorJMoIRKDS0hy0z53LRTThv9k01JXYzg1EojwFRRxDwFzVEKIc1M1JuOTMDWU49ctdFWYX3101gjDYtFFsrLQIRbW0z202WdbbbXZ4OddQXzMqgUWV317JY1XlkTVjJQRx2ABXHHq9bQSwfIttdtvU3XtW+tbNclUk01VoESXsyXtX3V7Re2f3ELWKU34yzYzlx91XFhfxvW7eFQZ32xUFsrSBTjJgEzOeV1XYt4pXeHhbFYW9/E91wrV3aYY95avkg7iWge4IIIhiY6gnsvzvNmnX3DWTeeJ/I5IqD/k1TSZtya5g1r356WKGqDpqa65KSvXho4rYHj2qiXZwI7bKQ13bVsHl3lM22c1hY4Ab335rtvv3NYUEEBkvMu8OsM9w9xBBXXKTq7CeLu8YG+269yyy/HPHPNN+e8c88/Z+/AxhmHKcHECxRc9NJJN8n001VvnfWRYNdNxRSLe8j2FJ88e25YUdTdRBGD39AzIpFigEMBji8qyN6jfP6kIW/MnfmbGBgbqdsCjZ6x7s+UPCO8Vcq+qO1R/j5NuhsNXyCvk2JVOabIXp/b+nttn6vxSYN7q/Nv4p3coAcsgM2lfDf5n7vS5ycBniR/7yPIAWeSQJcE8H4Mq5uswiJB/5dQUDQqa6D3CAgxA/bPMCcDIAgvuLEMQmsrHFSJB122wEetEDYP3B9tTHgZFCowhGeiIV9wmD8YYkSGPVMhs2zoNA2+cIfa62EFk2g/JbKviV55ovmi+MEg6qiLEBQfEbOIwC3O8Ifqq+KnhljC+PmvjEj84hTxd8WmFPEiR4SaHDGYxjm6EIttPOH8uHdGBi5xZ3SEyxgn+MY8xjGOa9ygIjvIyK7pkYV8ZBcii0I7kESHcIH8JFK4w8mPuG5xqDscKk/XvsiFhXKgg2UsZTlLWtbSlre8Dyk5YsrRqbKXutwIL1fny18SCJgNEWbrHGlIphGyhpi85GCoOM09Uv+zmvxiptmkmU3fYROaOePmAMO5TSA604vmPOc4RahOcj7TmuD8ZsfQacF2RtOb98TnNfVpz33Ws5v5hOc7AwrQgfbTn1NiZyHjmbV5WvKgCF2oNhOqUIHKc6LkpCdBLRpRiXL0nw8Vp0chWlGGXvRXJpVmRg1aUpGOVKMsBelH+VnQmdLUphslaUxP2tJyolSlK22mTmV606DytKdGRWNOdfpTohZVqUPF6UuFytSoArWjTw0pVh9KVZhK9apehWpXhZpUsLrUqmF1alnNOlZ3qvWoWl0nUinqVn9yNa1nzSpdyYrXYBrTr6n7K2AZNFjBCg6Xh0VsYhW7WMY2djz/x2RIMmcnu1JStrICKEBmNbtZznY2sw/xbGhD+xAEBDaVkF0Iat+q17ayRrSv3SxoYQtblgCPeBey61QdKlbXzJa2mPWtaB+CA+vNxHnoXKZcfRJc4QKXuZ19yAM01tSl7vauvX0udJ2bXc1GNyIokEB4xSsBCNyti8mF65m4y1nZrvezApBuQUBgLPMit6ENdW93t7te70KuYDYoSG6rG8Qn5fe9Bi5AfweCgoIpIJQCjimE1wobBLfXvQoOQAwarAAJRMW6uiVwyiq8X+5iWAcbVkB53ffhAZuzwAa2MH/hO5D5olgG4jvvfX1K4uzGuMQzdgG1rFWpGKw4xzvGsy+Mefxc756HXIZCAY7ti+Qdj3jE0g0yeeplqE9KeKsszqtrlXzlgmwYBB4+snJ3ytrljlnJ8aVxgwMMZpB6ebVizq+PezzjgsjAWCqWMkrRy2a+WPnNBoGArTo85zSnd6/UjYmh88zngkDA0pe2dCiNPGU1p9PRqpH0hSmtNEHruNN6ZvKSmYth+nV60HxlTKhlDGdSd1q1CZFsJy0bzF3zWtXBRfWqBaACV/YamcaObEAAACH5BAAKAAAALIEAGwDcANcBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ58jZ2wB6ev4AAP2LAUpAiEM2ftHl5bKxv8l0KGtMb3orZCoXAMETLVpEe2hKSos4OIxaVU1XVywAANl7G7BpOqZlQgUUFjUqVyxoaHhRZZIjUaoAAKtdAM0XF7cXNa0nJwBVVUo0SGYyc9sLGl9PT6YdQ6qptr68xbvS0tMOIeF/FgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEEBAhgEWMiwoUOHDA4gVPiw4kMGCA1q3CgQ4QGKFkMGwCjgo0iRJDmqJIjQwYCXMGPKnJnAI8iTF1eqRIhTJAGePS1m1LlTAIGgQo0irTiU6EaECGZKnVpT6VKHTjUCvcrwpwCuDZtmLYjwKNiFZc+iFTBWq4CoU+PCrGr2bFuWX9V6VRtA7F0AadUGPuv3LlS5iOny/dsx79m9gtkyblwX7GDLkicfRhxXsVrNjsFCJpyZ8WWup68WbruZs1TPdk2H5joa82TKfFMvXT22tWuZsMGC5lsbdem/upEmD8o7q+/fc5f3HK53q+3b0nFmP9nc6XPoA4Jz/6X+2Lrx24Ctkq58Hvtb8MC3iyQv2rzq44bVY2Z/H/136OJdRR9t9u2GH2v6Gcefgf69B190CQooG3EFKndgbxEauKCFDcL1YHjyhTTgVcX1596GzGXIoXsePhjgUiMuVSKDJ+amYoodfvjSi0jFiNSMK4KGYk8hJpWjjjwG5WNQQOJYY2RDanehcw5+mOR0E1Y3m4lC2hgld1N6V6WLRVa0ZE9NEhkmUWU+1KZD3bE5JnxX4nQmTmlK2eCXIr0Z1po6/fdbnSfdeVKeYO7ppY1HWuknQ4wZUGFPDCAwqZoGTCbpWzfdh0CnBmYaKVQJlGrqqaimWtWnkSHk6quwxv8q66y01mrrrbjmquuuvPbq66/ABivssMQWa+yxyL466lsENOvss9BCe8CmCERrrbWWCiDqX5sacMC14DqbrbfhhjuupgiRW+614367LraggRpURBOdlZJ7JoFFUr5c3Qsav1ftKy+lhvp0qZ5PrsdowvstlmV5W9LYJZQLT6zwZw/XF3GQsvEZ0qNrKUoxxshtzOTBiTKsYMUdL0pyfhSarKbIF5NGc8MvIxgzyyV7nJTPTAG6Esh93YhlyTtHdvPKSqt8H9APFRwSon0KXRTUcBqNsMU4x4a0ljznh/WfYzMUZ6Bag1n2QlJbRPXHVnNE9NxxP5V21WsH0HZFbxv/6bSGYSOYN9F7P9R30Es/HTiGg98tYsYEyrx1yyPb/LeFeRfu0OFu1u1W440nDnjOGCZtOddMn055zcJBTiLKVYuO+eJUgu7w1xDTLqbtTaOuOOlUmn6d76Or3rPLXsMMdu+rdz1886m3jrvGusvJu/FiIy99W5uqVSnscG97F7UDq8kqaeJzD1X5Up6PWbLwxy///PTXb//9+Oev/63LVvtutNOCyv+ida5lqWuA4kqXuxDYrAJyK10OQIAEJ0jBClpQgQwUV7zsJZGE2OtsQysJ+1Bikw967iACcAmSSqgvzTWEc1mTXYpCxyIdgchxFnFhV8DnN+LNjnklaxGd/wjnOhnxEHGXm2H10CZE8BBqPkX80RE7J0M10RA0TQQQEacXuSWG8HrPC6INn/g4Lr5OcinzoRKBmJ8sDmqLyssdGwWnvfZgcYxw1NnysEfHyoWxjXjEoZmieDI0xi6JVvRiUdzoGjLmkJBommIMESmlK8qGkZxx5CDNaERDwq2KlVSk3ObkxDyWbo9/7CPr7HjJQGYOkniS5J9AqTZR2g2TiTFl8FDJyuP5sZeAXKEgowbLQ8nSbCckyzCzZkkxCvOVnJSiJ3sIvd/xEUO4lIsmiRnNQtryc3XkUiufebs4Um+OjAunxMbpqGU2RIcLgeEsKVlLdFIpm53RpZiEB/9MVTqvn9h0ZTnH0j17ZctG6SPo+iLjPuMkNCvUUpVEVbVQ0uzvohjNqEY3ytGOenRY/bOgSEXqAGqN9KQUTBe6tLVABrYrgw1UqQFbisCXwhQBoEmADR2AEJ22s0EAW4rATAjUEYZkqC2UjU8/lK2lDjGZeAEjQGunzh4p1YZNFSYt8fZNFEpVQiVzKnyy+lN6ctWeu6uqkq6qI7KSCaoDoVszs/dLsOZHrOBx61O3Cre5+jN642ErU3uqVbP2tavK/CqMBPsgvZYSrrip6zp9uUq7Igiv0HGsFiGbHsVyjLL/tCyGMPsbzb6Rs3JFbFTVerS7YpWwZVVjItFqPdb/2omxY4XtW/n6M9XG1Z3IhKZr26rbvRq2t7RFm2fXGtbXCoC0mQTh1WybxmoWb3uXdS50c4la4K7Fr+mUrFWbS9znFla2oUzuF6kLRfIO1ryxte4Pr0lV8TJ3uO/drjalO8rlzuy4QRNudsur33x217+TAy1gRUulAsfFtI3kr90QXF0FWxO7o9XueeW7Rvqm1b6tVehzJ0riUjW1xCSW6QM5xVCjhm+lDfWUi5Py0Rrb+MY4zrGOdyxhglALpgQIILNuquLxYZDILAWyA42srQiiFKVHziBOZTPji3SwyhDpMQqDihSk9kvLylRhO7k8L3gGQJ7I5G2AfdsYfFJF/59y4udCGFCBCnxAAB+oM6jAvFoQJziY7RTwKeXYEAYs4NATEMAEDr0BKgK4c+C9p0CBt09eBoADh16AqzKNgUmit54elpObXwNntPEzApnWNEJSfRM+/5bCh7wjOSkdZ15CINWbznQH5vnps4aaiZNOnh4JHYAQpFrVAjh2BdL8aGayOT2jpkmpQyg8GRw715mGALN7fVj1LjLYGN4loTFwbGSXOwIhazbZni2oCHtXb8U0mGOMfe1VKzvd3Ebur0MY7fi828y1uXWqE73oVO8a3xye7b6/PWthD/qcC0F1qkkwARIcGySujmxlJwvotwpa3BBfCKbLnepObzvh6f9d+Cj7HZNtYiXeUzOPoUnOaE+jHNSpDGjDw11pYi+E3DRHN69v7uucS3rnge1mJCMm8GNzwNH5XrO3Vw7upJuzi1OfMHs/WUOkM7jnIZ/qhzf+2Y4/9eNgx7rKte7nCpv9sWivtc/FaeHril3UVf+6Tgqqr4NG5qFOIV+L/w5jFKeYxRblseIXz/jGO/7xruofkIXsPykXWX1JRvIBXXp5EW++plF26Qb1dWWi4gvLhWbhl7tLZkqpPmAAPybC6z5fo489tItVeiynicSoQ5rd7552UeTM8b9e+O7K3fojdW9M3kOd6N1eOzjb3t6rnxH4sOa6751Na1NbuvjhJbv/k7a/7u5T+/tlN77d6U5X8Yf44Wq3fW2pr33o65vncg87+9Vfe+Svl/7LZ32dhH3K13v2J3UOB3Lx53/TBYAGSHsdhn/eN3fgV1/u92f8F4FWN2z6V4G3t2AeOH8XWCgw5zayVzRq9ntZN30jWH0cuID7F364l34yCILjJYDSRIAO+HwQqHASeH4USIMWOIPjd4AqmIBpd30r6FUFyIPtR4Tvp4BKKH1MuIM214Mp94PDh35FiIU4x4D91YQvx2Te43foA2Oo9ycxFipoOHiJB3lwGIdyOId0SCwGBGNK1nkQFXqgl3mWpy0rZQBO9mQjxYcDNGUl0yBpOGcZlx6t/4cTXhYwjdgSgfSIJ8EAoNEgOtiC9edMgTZQGKKJS5hYYjh0nuhxoEglokiFpGiFpvh2mxV3aLOK8pd8rnhyp3h2qSgmtAiGbMeJ1JSLcLeLctKLMTiENtiFsHhashhCxhiCtgiMD7iM7taMRfGMQviBx3eMYsJyEGKNo4SNypiBPuiLbuGNOyJ84egemwiFGKhzn2h+18iOo9hn0uiECIKONwSOdiOO/0V+wQV8+uhy7yQb/viOyLiN0MhveZd7+XGQbkeOWWiOKDSQ6tiP9MiK9uiOEQmPqCiP62hmkxh8JNkoH4mExZiRtfh/93iF1Bhd/2aQKkmRrdiSr5iPDf95gwgCkbFmhNynkb9lkTGZiDPJjSLIkT0pjLFIjCvRLSuFUG1IGmtoIYBHFBFleBJVUe9Th1zZlV75lXAoeTBFeXkIiDNVloJIiCelh4FniP9jU1I2ev1SevoykpZIQikUSEVlQnd5VLHnfC4pkWAilCn4k1o4SsSXjUdpHIT5NxEQASngASnwmBGwZ0OJg95Uj69mI43pOxJHcJn2dIa5gfA3hSvZgFKpl06zAYeGbQsgdAEJknaTmONYg56imqgDdOaWaY1Wfig5gR2omNGIGZ3ZPDPXmvbGab55mLPJhf/ok2qIm80zcsiZbBcXm78JhMFZmwlpIMVJWRKXaa7/eWgHVzT86Ba0+ZxeWDXf2X5NV50kt2zmyZTaCYMLiZrEKZ2UpZvweWy9OZ+yiZ7OiZDa6J36+YSsmWmgeWwhgJ3MKaBByJ0FaiHt6U/0dmgUZ3Gppm0OSppSOICaqXH5uWEK9p7xuZwemoQgCpQiypgH+lfhSXLl+V30uYURqp5P6KIkmqMB0AE0twCdcpEQup04Kpjs+aK2yQAQsKRMuqQmh6J6B5z2KZwsOaLxtZ7R96AolJ4Eupi3uaNGmqUpmn9TKqFeaqBgapvr55AvaJo0uZE6eqU8uqY6uYeAyRDfc6fIVJV7p5WMiZVZaZYrNpUpQqhqApaImqiKuqgX/3WHjspkfNqUbGmVk9qnggqpgagpmegenNqpm/qpMgmq6LGTnkqUovqQpWqqoTqqpHqqoZiqqOqqryqrrOqMsDqrq6qquhqruVqrKUmrv9qrrSqsuLqrvmqrwDqLt1qsvGqszHqs85isyEqsqris1Sqt0PpbDWKtvMitweqs15qt0wqu39qs5jqs5Fqu4uoW24qt0Uqt3equIbmu85qu43quz5qv4Wqv6zqS3qqs8oqRAcuu9Pqu/Fqv+LqvCRuv8Fqw/jqwKNSuEKtMEluwBDux2vqv94quC6uuFtsYFduxANuwI3uwCPuxIKuxJ6uvDGuyAkuy/dpdKruyCsuxNv/Lsg4rsxibsTubsj2bHih7sTBLsy0rshuLsyUbtEA7s0Lrsk1rtAYLtdDqlI+KeZGqElS7YlfLEVmLqVWLeYwatmI7tmTbK2KZQWSpeZn6tZ4XiDR1iG75LoiYH4s4EnS5eiErtS9bMn1pEZEoVH8JfHl7s93Zf1Fan25qlElLuEUbpvc3plKauPdJtB5bs2pquGxamit6mpS7uEhbpckYhSqagyG6tD87ktkXgG26uW/Ks0PbuaCrkHX6oaTLoqb7uns7p5g7u6Obmbb7sKdLkue5pQPakZZ7vI17uRp4uDZKpF3quchbuRO6vJlLu77LubnLuNJ7prt7X6tbu9j/+7TaC73TW47Mi5jFm5R6K76fi5+he1vMJ2+Cy7TsW7jUy7tkKrlUGrXje7T2a77V27tLN7/BW5hQGsD5y7qK67/JG73D+b4kGL8xp6ezt74RS79VaJOQUoJ8c4LAi7v1W74Teb7NeaPPy8DbS77ce7/ea6dl6MGXarVrq7VRSZx1WzRlm8M6vMNie7YMlLZ/uLUb0bUyzLZ2+nlw64eiR2UcVC91qbMFfHp8WbeYyMGGA8OD276w676y28LWO8Cl+8FOm8FIWUaYCca/C8UgTMYQ7IKaC76t67NrTLHq1qEkPKRlWqRanL17zIJlrLpvfL1xfLtjTMcASaMBSrwm/2y8DdzIKRy7dOrFArx7BDzHeGHAdozAkavAk8vHDozCD9zFopvAcLzA/NvHF3zIAJqdzZvHJ3zKnwzLIvyFd6zIzsvIjyzLuRyGt8g2Vrw5WIzBhmzJm9nL8CbBJkjBKCjMl8zMxazBvozMHazMYmzBw4ylj1vLysSluKzC3gzKkNy9o7zJpdzJIbzLnjzLRQe5iMvJ+7vF8JzOKwzA+EvOgmzK8XzO38zFkTzO7VzO7yzP+5zPbCzK8It5L0zNMdy2RtyWC22nhto+N8zDFF3RFj2HPoxAQMx5D+3QQqwRRMzQWvu2bxm36zK3CELFdyuJalzIzfwvVPx6gPvLL/8UzFEcvKm7SWdMyWHc0tYMpwYdwTvdfJXs0q6LzQiopducvp3400ft1C0a1G78xTydxlkcy7wMzcc81PLb01ftyKHczwf9vfdszqlMzHKM1Eeo1HjBzerbv7o80L/4xzpN1mgcvmdt1Gmtuyzsz62sv2Yq1/oMzvwszmMdyHc9yNUM11lN19zE1RNc1FBNyHxNz5JMymUd0IMd14Td2G1sxnZd1Xh9zZONuqW4wdJ8xQr91ei82Z4t1aCN2KKt2D7N2HP92YBM1UTt1c68146b1Oz81+4c2J0t0MXtx7hd17K921bd25T922sd3EOMEA2gJdUNFg1QqZLa0ZTK3Zb/+tEGsSlrk93eRd4Xfd7ond411tDdDd4+pt1YC99cK9/T7d3b7d7vbd9Y+y8H0N/+/d8ADuCZ4hEBXuAGvtioTNo76S0G3uD+nREM7uANLpcBs9JCheBYbdxF0bcV8bddFri8fdPQmcn1/M+ZTdycneIEXZOOPYaQncySbduufduwnduTzNyj/dJo/dzKa9l+jb6L/NYJruN63VmnHc0vPs0xPuRPDd2jqc1tzdTBWNq13eMjrMkmntj4rOErTuSVfeUlLtwAjeJd3uRMHtViLdShjeO0zdqCXdBpPtU33tXNLeJq/eRYLuYnrscZPuN+DueGrebLTec5buZ9ntdf/07LeQ7ktyzkh67gZ27kxgzidb7jpj3pNL1Dq+3cGK7OYgrlv+XWTS3jiE7qLJ7cj73mhN7mnH5gR77Vqh7ZIW7pro7pTHbdGoPrV0He+D0QIX3EM8xk4w0y5q3exn7syE4/7P3dy37fwV7ENNzs+y0bEu7gA14S1d7gnd7apR6KEZ7tAA7h4F7gFC5UFt5l2/7mkE4lHH4RMv3hmR5PNk3rbh7OfX3Yur3qW/7n6w7W9u7j+D7nsl7pRX7pWk3phe7bkW7wLV6QqQ3Mm27nVJ7TqT7oA5/wPP7oXu7kBxzmjO7K3Xzc3a7xz9zwqJ3kqr3kJK/w/wvml23PWm7W/f/O7TMf1oEu55gd85o98v6u4jQe57Gd7xfP6hLP8ST+8lk+2/vO8zS/8Vau6B5fwo0+6gtf5S0P9Uiv5zpP5lze9fwO1EBv4zmv9DLv9Cuf8Z6ezYsu9SDv6D1f5iyf9sAN6o0h6lNu6mb/9j9/80Ev8DA+6wVf6wcf72c274Ff74V974Lu0Lp+Ro2PFLz+7CLttSs27OWd7Jif+ZofLNI+3/pd370uEL/u0ZIP7J0/xPw97v997aof4Oku8jXvjN/e+uLe+v5d7l127vPy+j7/9b/V7lkmQvaC8ERP73WMyKz88YDN53rv+3E/zy7/42y//K8M92jf9CWP6i4e63//T/BUjsnIz9ahLuXTiPeG3vyArvg4D/Nkv/Ox3/tMb/Pq3/djz+ZL//7Wz/Da7/AoD/Eqj/4AAUCgQAECBh5EmBBhQQIBHD6EGFFiAIYTLU5UmDFhwYsdHxLg6LFjQY0lD5I0aRJlSo0rWSqsKPJiTJkYX24UUNMiyJw6Jbq8edJgUJxEixoV2tBnRJpLHSIl2NOpQ55THwI1ipWo1qBcbzZ1CnYpVAAhrVa1SnEoWa8v27J8m1Ksz7k62UqditZqXJVrofItCbilAKV7CacNcBex3qmCMzqG6RcpZJyFGx9Oqzgt47CSs3reCrqr6K+YDVt2qvms2ctko7ouS9qt/2y4pi+jHvsXr1POSykvpC03eF/YdWsal6k6L+vOxYcHfj7YOW66tlPrXszcd/TH3CM7n44YuUioBgo22HzeaoOCBsiaF+C+fPv39Odb98kev079Bf3/BzBAAQcksEADD0QwQQUXZLBBBx+EMEIJJ6SwQgsvxPC/++RDCj4OifKwvvhE/BBE+zo80agQy3OgRRdfhPHF11wr6AAbb8QxRx0P+E0o8Gj0rigDdiRSR5KGLDLJAwZgskknn3zSgR4HKogBqxiYcsa7frzrACunwlIAL6+EsswyEcgyNvGCBI5L3cJLazyPnszgAjvvvCADKNFkU6g13ZysTyoFjYo6nf/k7MhJEBRgtNFGQXiST+f+BBLQz+A8DTEnT3C00xMiTVO76iwNjdTRDD1uP52azKBTV/VsUlIgKd2yUlvvQlUmRC9iMoESXO20hARiDXW37UwtDdnZchVpV4uYtABYVy0gllA147S2WGVrY9YjZycaIAFpgR12AFmVa+7WN9UNtNuRVK1pgGg7HUGDEaZl8lzssN1WuH77cncmeGUK11UNBNDA1XL1DZTWdWuFWLeALfpWIiZZ6NS/TmE1t1iHA/0XOkxv0zTcXxvVuNESnGT4s48vZRdmXNecGFwmF0W5IEchrXZSfmMuFehTaS6ZSU4ZTVmBC0DN1thRhU4W6mX/iU7LSXGR1pnRcnue9eeIQZaaW6qtotMCC/wzm2Ou0T02bH/dBnjsqcz0z8yOm3456K9lllhup8z0QAAP7G45tLxHC1k6IGv+aWCRzETBAxQI99jrhy8He2ZsGY8oAc8/Bz300GFI0UQB0JtKvxKDWhHFETcUkfOH9JPdof4yxD133XfnvXfffwc+eOENhL14FUtnHfmbWj/+dddXX175l6CfTckk3avR+iK1hVvkSpHU3kiDwA8fx7u+dIoBHgVAf6kwE+8O/sjGBLPG9n1igG2fent6b70x/0ztHFKxiOhPJ/w7VNPkdxTNZSoz+1qN0xK4wDZ1bzACVAsGDVgT/wSmioI+smB3MEhAiGxQJh3UlQJD+J3F+S03DUuPBD24QgYCMDQjdJxHTCgSFDZLhf5DXAs3h5gdeqSH3vqhDYPYQJI9EIYRPFzUgCjFvg3RiS6LYRRn88FBjawzGoTgcmSYQi5qSYmlwSEYnyhGLdamjLHx4nbUiEUoWi5zZ9yiEB1olSJ25IjveiP3mPhFIoaRN6KaIA0rOMWpWZGPhlzKHwUWyCS2y4U+6eNFJEmxSvLtjpZ05FQyuRNEzpCRblRkUi5pF0jur5RkTGUXY1moVdbkPqgTIy6XojoS9dJ4zRMg7dJyu+EV05jHRGYylblMZkbol6ajXkqYB01fPv+vms2LpjRP+TY8onKbxJklHGHTTW5+0pPn/J850zlOKqqznehcIjnB+U121tCdeaSn4vIZv3Duk4Xy9J4/7QnPd9aznPf0JkD1qVB+CrSegkRoQiN6UILi06AWnWhAGfrPjQ50nRfVaEYXKtKRVlSiJgUpCB0qy5WytKOLfCk7IYrSecbUpSRtaEprilOPxpOnPS2oT3Wa05+q1KZmLKpRkzrOmX5UqE4NalQxOlSOLlWcLUUqTUOq1ZQ29alflepJoTpVqsLUql4FK1nTKtay3pSrVX0rXMfK1rBSFa1qretO4wpUvLb1rnTtK2AFS9G52rWTayUsYvVa2MW2FQD/00ye86j5zMhm0ySQjZ5kJ+u6ZnbWs58FbWhFO9oeYXZ60pMmalMrgAK01rWvhW1sW1sQ2da2tgVBwDU3qyKsXtWqsbFtcF9LW+EKlyHkK5+N/lrW5Ta2NMU1Lmuha9uCvOB+OnlfP4/q270CZ7rUle53Y1uQB7QxsY5t7lZDI17ZEpe9wxVAeSViAgrU174UkEDj3tjbLBXrvbB1738LQF6JiGBc+tXub/vbNAG7NsD/JTBEGjAuBbSAKYd1LHe769b1NnjA4RVwhB9iAgorQJfp1SmKifoZD3+4xSIOgApKrAAKQETFIL0xX2vzYhBDOL4QicGMFZDfAWIYvUYe1WxfeMxj+QbAwEJewVWQzNwpO3c2S/YwgT8wIHsBSwVF3u92F+wcLDeYwAVKWKdMIOUwK5hQ/s1yj9+rZS7fq1O4zLFB86xUFseZyRGZsQhsXOWh7pnDoylziH8MkScfeNBt3vChAyucRPu4yQ9ZwbiIzOYERzqrioVOpee8aIhIQFo1vjCkGavevIbaz3G+9EMkMGtaz1qXYO70qksK6sGImr0wTldv+ftmBr/azKRuTa55veJlR8bX4gV224RtTcqeVrORffZ3C+KCYar2st4uiQECAgAh+QQACgAAACx/ABsA3gDOAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2efI2dsAenr+AAD9iwBKQIlDNn7R5eUKgHmysb8rgWnJiRp6K2QqFwCUNDSNhjjBEy1XU1OxhyZRg1UMVU4sAAAwZmZqSUkCGBZshEiSI1GqAACrXQDVFBS3FzW5IiIwKFxmMnPbCxo/g1+mHUOqqba+vMW70tLTDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEMAjAsKHDhw8ZHEi4EKJFiAwSHty4MeGBihdDBsgo4KNIkSQ5qiSY0MGAlzBjypyZwCPIkxhX6kyIUyQBnj0vatSpMiGBoEIFHEUKcSjRjgIQzJxKtaZSphCfcgSKteFPAV0dOtVa0GjYhmbPBhhLdmBCqVTjwrS69Gxbg1zDflW7VsDdslfVpj3L9u5buYjp8v3LEqzavYL9MkYYmHDlsIXbHkYcV7HayZT5QrYMGsBgzJe7Zia7mfNUz3ZB5+06GnXp06pTY12ttbVrmbDD3nZ8tnbu4XVRJz8uO+rv17ixDhc9e7fkydGZZkfK+6nv5y+Dd/+d/ri69uuMtwdV37M70e/gxUtvTp247ebLrec/Pxwu+Lns4UReceZxh95fAZ6UoEju7eTcfwDqxtSAehW43oGGSWjgfgb2B2GEHAZFIW0WtoehZhpeGKKJHn44gHwT0leefcxhl6KJK+LU4ErwPQcjUiNiZZx1yPG1YEg7FvXghz+KKCOBNBKJn5E36ngia0tC2GRPQTI1JH9TRpajglf2luV/Wwr4ZIVRgmnjmAxWSWaLTB55UZdIfdlhmJbBiWSZ3p0Zn50W4RmUnhcWKSaVdGpJKEQGTGZAiScxgAClZEbK2KRR3WQdAp6ep+lfnCKQwKmopqrqqlaBGllCsMb/KuustNZq66245qrrrrz26uuvwAYr7LDEFmvsscgmq2yto95VKgHQRivttNMe8Cy12FJ7qQDNtsWpAQdkK26024I77rjlSpqQuedmW2647WKLwHChBiURRWel1JxJYZHEb1f62vgvVv7W2xMDhvaEKIt8Ksdow7n5eeeaJLa555tUSmxRklvJGafGWVEsJKZxKtrnwxgv+pnIXpL8p8kORwZzxIuxnKfLSc2sH8rpefwnyA8ljNPCVup8HtBiAfqez0kh3ZDQJxE9J8Q7y0z10TXbWB/PCDK9sddNKe2g0ww9GrTNh+L8tdEbcp0h2X3BDbVPaofNtopuowi32Q7N/x2S1CVf3bbVKZ+8stYz5o3l3mA/xDFUjMuNtsJ1Oy42j40nzfjdOGbd89aE9xy54maOfvjniYfetemkCY736V2D3nrhMc8uesaex5667avjrvrbvseGOJSkB8r6fbTTDHuGsiN/u8q8Aw+9cJMPXXnSnFu5ueudq9UtWZyqZen1aHGrbqeRuWrZ91qVarCJ6qO27Pz012///fjnr//+/PffK/tPuVa8qiXAAZJrXedjlwEPyC14LRBa6drUuhyAgApa8IIYzOC6HPjAeTXnfZWaiELy9biD2ISEJQEhSkqIFwG4xEVWGRhTEFa9qJGvbJdT0vFq1DP/OCpzT6sh3f8slijuaa94S/MhmvgWxOGxCYljC57zuqbEQQGRIX67COBeZkQybc9GVfQRE7EoxL/dsC/Z8yIUMRfG36TpJFm0yBZz1sWPrVFJbXTNG0USR4jMcW11/NkdO5ZHzuwxJH18yB/tFsimDRIqhUzMGAOQSIcs0nJptOPvUBRJuRxyYk6s2CNNeMW4jbKFnezMJCvplTOyEDA7lBIYXfSiVZZRi67MYcdi6aYe0vKThbqlHHOZSUFuEkuprIotQzkyIjIseVWLHid/uUzUEe+YpZMiD6lIzVKykiGXxF4jv/ZFX8Kwmrq7pjQXp01ZmrNO3hSmH4k5zrCVk5vnjCczW+b/zKLV03L3zFAyoaNPaz4Rm8ZrZy/xCU+4AZAo4cvXtqj0UJ24L30qjFNFV1IqVnmUVW/J6J/8R9KSmvSkKE2pSlearI2qpFQZjGlMHQBTmdrUggiUYAMfyEAFdjCnpNogTyEo1KF60EYJoKUDEpJUeA5HhkgpGAqfKlKLSLVfw2nqh7al1SXqEnIKvdjzDCe8nnX1P1ydyQPWuoEPbGCtDwDOKxvDS7H2bnrjac5ZwZPWmDxgAYClgAAoANgMyPWrpKxrEaGJteWhaK/P6StMLADYBcCqshqIiVWK6UiELk2xXNIrLSU7gAhU1rIJqawE4hqeubqllJN0LWVAqyak/46WqTHJwGkvW1kQRIiz5DwlLMPqJNu6SLIqOC1qBaBc1m72n5oTLl2JG1rjbhW3A3iABJTL28oatpaIbSFtp8bYwZW1a5D9TV9BoNzltjcC4AWuPaX72vHyUbTHxa4G2tvdysL3ueV93Tqzidf5WBdCktXtaQVL2NNaoLXhHW6BFyo9slLvwGjFbmmVOwIKjKC5EJYvQOk7W+rW1qy3FUBXKdve0/o2xNAtX0D1ZmI44ve6KvZri1XL2vjGGIczZueEgXRjBGv4JezdMXx/+2NTejaKQy4uivObY79ut70P1qxsTQPbLos4uo7FUnpdQ1orflnGJOayfRFZ5AxX+f+HZwZymmNbUPSmeMySjPB0o/zMsdbuwlPGMZ49uWU6B5nAFu6KSznCqY86GlVcfbSjgeqskGI0MoveyEUtE7/csPTToA61qEdN6lLPKtMHKeACrfWWoRIggkHdqVGLylNYV5pbFLzpTWndQXrlS4RVxUihU4hCqNpr2C+Ep7EP9s0AhLN8cXbygAM1UJqgk3m7m2KF/7xNgXZTcvu8WT/J62flTTuJ384dttWpbRrz2Z+zzCe4DSrKJ2NuzXSMd0PVjaLmddvdiXYnQ38473Qe9NxQDjiFpylvfmPJ3wLftrnbjcx0h9lMEF+4kBVuV283/OKBynjHAc7tiDN83yD/X5rIF1vuaFLcTNU+bMHXffCXJ/Td5B64V2fe72z/e+Ml1zjMLX5emtcb4feuceD0TXCHY9znJgf6xH8+9I8XvefspvrNOc5ynVuR5w+HutC3HvSRV9zqgDb40W3+WaVzkek7d3rIxW52RJe96x5H+dXDnvWo233qfqc20cOCaoNEtF8TxfT5Ov2pYIet8AXpqKQ/amnLmPrymM+85jfPeWYt3tWsjoqrbe0tXi/wXaOndOllXWvTG/CoPXO8Q+41wn4Ne9k4uSrAbi/7huieYM1+Ng6jTWfi19nozZxzlw9NdsDHKNxpG/fSW95YpOvQ7aCkd/LtfX2cT/+uXK+u//b5qXx8AzLA3dv70/s+9rZ7/+3UN2/akU9+7u8S++ePv4Dnj/Waa9394Qdv+pd+/Md3/hd4AHh3fQZ+Cnhiard91nd/75dvA3hEKecgK7eAEudy/5dwDWhj0Ec50gd/DOh8ddd8HGhg4ydu5Yd/jIR+Fqh+c8d+J5iAJoh3JHeD4veA9ReBYDWB+VeCKfh8Kxh9LQiEL1iBanSBmJOBAiiE1cd2HqiDDkh/LGh/PxiAObeBUZhXIWg9I0iBUCh/HZh0SHg2X2hDYRiEXEiGCDiFQ4gUkEcQhwcwibc+i9d75cN4opKHl2Z5nReIgjiIhFiIyzKHA/Etnzdr5qNTPv91eq43QKQHPhOka7vGer3WHL5me3r2WrhXKSfEicORbD/0iSgxHM02bOaXhF4nRseHIqnYiSV2huIEdy8RARHQVhuAixHQYxAGdmYSi8bHfOgmE6a1AAxWWVkGInK3NMLYZMXXHEqkXYDVX0v2i83oIM8Ig0vog6SkRPtVjamlWlr2iliyjUqoSd6ISjpWWf21AJmFjUyoJOg4hvtXhngUEywmjswFYuAFjIFSj214j2/IRjBxjO44jg4mjzLojPKESdDoZdIIEwqWkP0IYsAUMjYikDkYhzh4dgOQZBbZXqv1j9mIORwpdR6pgSdHjZWVjMoVjxmJhj2Tkn+3kk//mHcDEI6A1WEfdlrfZZLz2DE2iYJdWJD5mFs75l/MOJRQUZQ2iJNbeHK3uJQLsIxC2ZDa+JC1mI7GtI6AUUX72F6+OJN9o4lcCW0RSYwOUkUPkAESEJdyKQHx2JRaiZJpOXxrqXwxV44A6ZBp+H0DSYD4SEiD54U1mZdotJdY+I2HqYJdA5VweJTt15aPyRSIKBCKqFMU5Yecpoc4lJkAIHmTB1LoA4iGmJqquZqsSYiiqWoGFHoIkHqNGGuP+HqVaIkypXqUiImQ6Juvt4kAA2xTtS+gSRKk6FVUVWzHGXz0xI1x0pdMBp1fWYDrd4CVaYac1k3DeJJK4oRTqZLn/yGdMEadnXWX30l3H3mT48mdjImeHQOegtmR7ZlP3emUpCSfJDiYVkKePmaewYWfLaSfYsifZOKfAOaV52mdM4idNTiZBoKgquiCNMmDVwiWe7ad9vmeDKpy6smS4hmh7gmg8yWggEGgbEifIrqhJDpiJtoYKMqKBhqdI6qgAQqfUBGjENmiSSOhsqhmFHqWgWlGayijKnohPnqfOJqfH5qTRwo/NWqPhAmZVmiEjSleVJKkHIqYFmqlGFpfWRqlM1qdXFqlIniEGupUW0ql/bd2hZmFqKGlPIpmL/paOtqVUtqfYvqkMUh45yM+d4gar1l5cVqaHsWb7UOoucGHBv/Smo76qJAaqSg1qLV5a5m5mbaZQJrqiJtKKqiIlom5kaAaqqQamaUxqqZaqhnyqaLaqqp6queIqrAoq7Pqqqn6qrAakLQajLvKq7a6qr2aq3j5q7FKrMWKq76KrMJKj8G6lcaarLcarcC6rMcqrdU6rcoKmNnqrNSqq8+qrdZ6rbW6rcPareCKreiaruMartCqrub6lM1aruzqreTKrN/6ro3BqvN6ruvqru3ar/hKSvrqr/S6r9xqsPIasAIbr/Zar0TJsA2LsPg6bBD7sPd6sATLrwqbrxW7sBebsAArriK7sZQxsCH7ryNbsBmLsST7WiabshqLsjKrsidLshT/+7ERu7IgC7Ms27Jc9rIzG7M0y7M7G7QKe7MOC684a7FJ67E+y7FL67QSy7RTq7RNa3idaqmcurWZyrVa67WrJ6liO7ZkW7bDQqmzOVSySZuU2rZZu3q3KYmRGC+w1zXHSZyi2LFSq7NKYooh8Xsz5JxFuqNXCxhAy55uSIRdeqZXarh620JK2qEY2KThabRFO7SIS5CKa6ZgqHyHi7lGmbhENqS4NLh4yrdUi7pwSoUgWISM+6Ul+7iOO6dyVqehQbnzSbQ5W7NRSZmj67qd27hQW7jDm6fdKLlNiLv7ybs9a7ndp4X3RbrDZLpqGbV7y7za+YHRC7xq6LmyW7xj/7qgZdqmECiFlyu06Auhmvu7ixu8sPuz3+uya7q55NuD5ru7upu6fHq842uAboqU+pu/Vmu86oi86UmD6wm66Xu+6jul9Ou/5fum1yvAE5y5Dsy+nNu9wiu/1gu584vB9Xuh9xvAzkvCobu+Uta+Gvy+SEu8YEqLDUGpgJpLbgu2vYm2oNkXjHohZtvDPvzDPYy2oAebclupcPu2N5xAHPSbcUu3wkkweLt7P9rCVUtKfnsRgBtVguu9HTy7NlqiSzqgylug2Iu/Jby6Urm9KjxEXOzCsUu70mbA8TnGKUrBHtzFGaq9bCa980S9eonHHOzGQArDZMTHivScgkzF4f97o3Kco3RspHbsxVWMpYRMSYopfIsJyG+cyMvnnXOMwCB6xhWswA3cp/17nf+bnWZMygycvayrxhnMxhu8yZMsyYsMxo3MpKDspJEMvmUsgdC7x9wryyw8xcYcuafcoKn8oK28ygv8vHqcfWtMpG1cy74coii8g7FMzbMMv5rszV/somF8oo9MuNYcyOc8i8EszdtcutWsuqPcu6Kbwu08ve/8ywN8y+Kcy2K8y5XLys7cvCd8wUEhwxJFw0icqEacxDq1w/CTw0Ac0RI90aopxGpLxHSLqAGk0RDF0Rbl0Rw1t+2Cekb1xDMUxQSjyPgczw5yxVYVigCzxd2s0r3//MLrHEyGbEmInM7gzNOG5smO7M+5K8p3zMmrWKH13Mf3XNO0rM9gNs4wWs6nu9JF7dOdbLtcdqfVy8mfK8/ZXIUh7KUjnM9Ubcv7W8DJ7KFCvbxM3dPwTMk3rZHDzM3F3NUC7dUErc1h/bpjzdLNbMJ4bcpsCsH2K8FV/dZmjc15DdaELcKGndhEDdkDLdgPjMoRDMBk3dYTWskyXdfx29RnTaaDbdmFjdl+HdB/jca+S8977b59fdhlfc0WTNkg3Nhi/diyDdCAXcr8O9rKfNmqvNupfdrAHM04PdfuPNPH/NmDHNcPQakNMCPRHRYNANIvZd2Mht2apt2pNkbV/31F303R4j3e5D1qNdy1X4ve6b3eR2zD7fNUBxDf8j3f9E3fkeIR9Z3f+k3TkZ3bzggu+h3g8q0RAC7gAW7SUYXSM8Tfup3ZZuLSwkZsWHXJO43Y/n3XvI3Wvq3WDprAzyzcqA3NryzM05zcnv3Nm+3cQorc9qzcdj3ccG3ccl3iLX7iXP3BrG3bfI3b6GzhPR7a4rvhk7vWZKzZyw3H19baK/zakv3hDj7ZvV3Zv13awf3kTk7cqj3Peq3jrs3joG3kyCzkyUvkddzfP27kR73iNK7ULs7cKS7jSK3kxMzkFw7jTZ7hoi3lHL7MHo7hdl7nIp7GJJ7Uh+zHmXzjKP9+1VBtp1K91Vb94oG+2ltO2o5t2rAN5jg+6VNe6VWO5Z5+6bMd5bVN6bdt6Xf+52ce6hqOmecz3Wzi6ljx3efN3gwda2Tz3bde3rq+67zuP7Pe3rSu0L9e68G+0Qv93vti4AF+3yWh7AHO4FcO6gFZ4M5O3wRe7fmN4Pai4FEF7X4e4h0D4REB08BH4Ybu7aj+5aqe56O+6aXe6dIe7ace6VrO2KS+46YO6CC+71n+1a275oW+1Gau7lC+6u2+58DNzOBu5d9e3CPOznJO13Se6g3+6RKm4k3E4mxu44+e6fbu7vgO7/PO7xafxw9/3ACv0+d+5Igezk/Nz+RM5pD/PPBuHds2DedqTugqL/AVH+94HuR6PuQdHso9P/IM78qCDvFcvuReXvOYjuTmeO9dnu8UL+/63u+L/e8630orD+kk7/NIL+kfj/BUrvBfb/Qlf/MnP+NbD04VbvMEb/Vqn/Qo3/bO9vZPb9RBmvEpz/U8L/dx//OMrGitLt1nIesJbezDrtC5Dt69/viQH/nHsviK796Vb/kdfeyXr97eAt/YPt/M/vn1je4Ln/YlS+2if+2iL9/afjDcfmws3/ECc5zkHrjm/vcNf/T07u+wHPEmPvGBn+5Ov+5Af/BCz+dED/jDX/QmT/ds7/s1DvzLr/xvvvZxvvRz3vSkf/bN/y/2Wg/9Gy/926/7Du/81y/1TE/1wV/6YL/7Wd/72C/x2h/7Pq7OOM/3do/J42/60y/4uJzWAAFA4ECCAgUICJBQ4UKGDQMQOOhQosODBS1eLFgR48aMAjh+NOgRJMeDBCaeVFgSJcqRJBGunAjxJUyKIlte1HjTYk6dBHn2DGmSZk2hQxkC7Wi0oUylDH8Cfdozqs6pN1U2TSmgKFakPmdiZYo1YdWWZEeaBYn241WxbLl2DSk2Ydi2NruqdQk3pF4Abpv6Vco3oly6WPFuPIwxMU67SAEbfTxU8Nemhf82hopZqmaqnK1qlRsgMs3JoS0rXbzTc9nVZ1unBS13NP/M0oQH152cW2/qjlsv+w68m7LS05Bfrz2eV/hk4MabDzUA18DBBrYFVMfa4GD0rtMFcEfqHTxQ8dJno9QeO/tB9u3dv4cfX/58+vXt38efX/9+/v39/wcwQAEHJLBAA+cbr6fyutuOwe/Me9DBBHVaMLwGJZRON7gOOqBDDz8EMcQDePNKw7uSQ8wjA0RkMcSKVmwxxgNMdEwABsRigMSBdNxrOeEOuBGrHAUAEkcaMwuNx75QVIxJxphLUr23NnRStSiP3AzLzp6j6TyWfKTySjBP1PIzLmHy8qQyWROTyjVdg1I2KZt6E7Y2yRyzxjjrOnOlOpG7U888M9vTsDn/g3NTuEAJHTRL4fpEKc2J/nRpUUcTxfRESE+SVCJKU7S0s0+bLPS30EZlLNTPUFWtVNQONYrVpOSUtcRGt4xy00lv/UxV1mrdscrecj2VVzZpNRbOZGHTVaJOHQI2LmQzFZTazJolqlhrN/NV2W1FfZRYuaJdslvYyFXy2YbUPWpZQKfF89tVw5UT24bQHc44fPG11ylYJXO3UnirjbfgGvtdiN2FJrypQvKoEys9hltyWMELLYwQY4QTSm/jANI7MGSRRya5ZJNPRjlllVfOb+KRKqbwYoszfpjmmV0GCeaGZb5ZOgd+BjpooYcOScYYo+PQ6BbTFdZWeX+FUWkX/1WUWsQEBsA6a6235tqBg4JsioERbcSR6X1/BFupIYsU8mqu334bgdvAmvuys592zWPR/qXJba09qCBwwSvwgGu58x2quKHMDjhFV50LzW+sO1Cgcsst72Drw02r+9W7Db5WXLEkH8CEy083QfPOjVK8y6aD/fxg0bGSHIPTb8dA682tMxe52EOvN/KsEyDh9tNJ8Ht3sVpH8/Ue8T6XXj6Fx9oD428vHGvl6Ubc9d8v1XT2ptxO4Hrj3d6+stUXd37J73ENXi63rT9dBA5EwF779WlifiXGofed9AylN/LdjgMC4MDx0Lc/mPQvUu37H+g2ozeFKcRvtrsce06XO//9da+BDPQfBEXIL/EpRXLFs5wGLZe6rKWPOCB84PvmRSUK8g0mkqNcCg9yucy1EIYncSCnRtg4UgnQVPLbWgV0KADLVUB1HlxJECcSQYJVEXjTQ6LWylc5FSqAdC5k3Q+nOEQAKoeGJTQK6QbQAQxggD1t7KHuxCgRKTqLjBIE1xnjNzq4DYA9fQRj4uZYExn+yoivImAfPyCADwBykEt5pL8K6a3w7ZF2fdzABzbgSCiipI6EJOKTQqmaGhIwAadEZSpVuUoXQGx5rmyKxCCEs4/ojGI8i5kN0VPBj7HMl78EZjCFOUxiFvNAtOSILV+Gy1vaLJfI3Igyc8bMZkL/EyPSrOUkA1hGx42yI+TSphnxOMNxGpKbfCniOVvlTaeVk5JWRGce3bnNedITnvK8Zzzfyah6ijOf5uxnN/XJz4CKUp3rPOg32alPKoLvn/t0KEEfOtB0FhShFr3oRO0pUYpCFJ8RBelHQ0pOjXa0nSWtKEpTytGRAtSk0Uso7Bb6PIwqNKYMvSNLRbpTkuq0py8VaE1lelOaqtSgQsVptBraUo/+lKdAtSlS3TfTqRK1qlJF51KdulWX+rSrTIVqUb0KU6xqlatNDetQpWrWrz4VrW9NK1vJatSMjnWudg2rXDcK1r269a58hape/YnXwQI2qHQtiDUvgs1kUnOa/87cGWSrOUvKYshBxsRsZjW7Wc521rPvUaxFGBtNx9aytKYVQAFUu1rWtta1qj3Ia2Ur24MgoLIYC21iw3nYzMzWt6yN7W9/W5KoVc1Dgg1sTg3bJOEON7XNne1BWpA2owwJnFTlEdOgG93nbte1B4FA7wqb1pMSdqWb8e5rg5te4AogvA0pwQXkO98LTGBdyiVveZdbV/Syt73+be97FxIC8933ulbNrggBvNr1Ahi8DGmA+RSwAklSdbfn7cyCYdtdB7uXISWQsAKwMxb85les+41qjTRcgAb798EKQUGIFXCBhJXYxMjF8GdW3GL2vjghL5CxAuxL4gOX1Xna1UYwj9PrYwIHOQVZKTJi9XtW2OyYwy728Anicz/joYDIFsbukRWc5Cv32MPzSeDpSgBlMCNYzJOxspXDq2X4cPl0I8YxRgICACH5BAAKAAAALH8AGwDFANIBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/0AAAF6esjZ20pAiUM2ftHl5StoaLKxv8sYGJQ0NCsAAFFVVXorZLQkJGxISMETLaoAAAIWFgRSUpIjUbcXNR1GRjAoXGYyc9sLGj9fX38/P6YdQ6qptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQsCMCwocOHDxccSLgQokWICxIe3Lgx4YGKF0MGyCjgo0iRJDmqJJiwwYCXMGPKnJlgpc2EJ08SwJkzpEabKhMS6OlTwFCiFn8C7ShAwcynUGsuZYrU4k4BVSEqnVpQaNaHXr823Mp1YEKnUNPClFqWJVaxDK/CZUi2bVi4d8XWLXtWrV+2bRG+hSt37l6ueb8mznp4al+/aQEH5kmYsl4Bgd0exWt0boDGSx9DfirZ7mCxhTlnNtvZcGvVqwGIHi2zNN/TX1Nfji379eXNu2PPpr2Wt+XcxxljFu5bcXPlxpsSpxnds27n0YE71w6dOdrpxZlb/09eFTTQxeWfp4/+HfwA24hxZ73efTV6pPeJmr8p3f1L+I7JVxV96zHHXXoH4recff35B2BoAiJFoILZeZZfT/utNNx0D54XIVET6rdgZhfmVOJJGQbVoHsd8jfehxiOOJl6CiYoInv+/VfdXCHGWKFrNvroXY7v7VgZjCbKaFqQSTKJopK3tQdeixoiqRN5FBpoIY03DpkjlSq+uOWPnDkpUoocbUgcmGlaKVKPSZL525heOmjAagZg2dMCCugZ452Z5dkUSM4pQKhygAYmqAIJNOroo5BGmkBClFZq6aWYZqrpppx26umnoIYq6qiklmrqqaimquqqrIqaaFuLEv8g66y01lrrAbHaqqutfQrwalmCGnDArsTO2quwxRZ7LJ4JIZvsrgpEdyhSElEEV0rMmSQWSdp+hS2D3Wa1gJGo+RmnlkDSyaCZIZGLnJtFybmduiRy2ZO785n7pLzKsXsRmkz5axG+A+p7JpTxCayVveeu6xnBEhocL7pl0jujwg9BDKLE/yIcIMYOnXgwv1Vp3BOc+1I8p2skI2VyTiiPrPK8LM+c1ctXwttxyyKCPJbHEPrMEM5vcpwU0B4K/RnDKTs8F9EhxTyx0xXXTLVYUF8k9c42I2jxkg+Lx6PRC/Mco9IAHyRyu2If+fVtaDMt89VfZW0V2WAhzV/ccZv/nZPdEG19tN9P9t21y22Xq/Pgh/f8dsJhMyim1fXy/fjHkdc7OWx093t50JnPuHlwnXtN+cWe/cqVoHPxiXfIvjI7qGuGuqb6VItOq2CrvPfu++/ABy/88MQXz/vtS+X6rK24nrW8rssG2uywz9d6LPXVG9us7AY0oMD34Icv/vjSXjuRQtembZBHuu/Jfvp6V+nSl4A/JHjZjZ/9uYdScli/Q/fLG+EOZjgG9W9N/2tIAGE3wKIUsF4HpA2bqDK2xeGvdDXaH38iOJoJqs2C9nvdzxrYsQfOiIOQ8eD6QAhAEdIlfiqy3OlMg8K/JDAuLlwaCY9mQhoSSYVdYaEC/3OoviDKkHMQ/OENA7DAEeavSRqsUg3VAkS3jA47TyxcFFU0xcgssYkv3OHCehglJSbuXVts0xFJl0T6nTFfQgxjFgmYRqZ0MSpfJCIM1bglMsbnjqTJYxx1OEcH1vGDgKTOGws2yCJqpo+HXGEiayPISBoRkjMsoxslV0FLPjJdmfyjGTnptlBiDpRIPOEoNddJU4IOlWxU5ZeQBxTWXatXW6KlTXJHu/b9SXaMkpQwI2W8YhrzmMhMpjKXyUxM6XIlixqfNKXZgGhO85rg2570fIW97BHget7UXuy26axwRos5CSBSAxKSTgc5kjXhqgq3fHmSb9UrntSKTjv90/+rfU7pnYJZIxYxSBR9EqmfNGmUBjKgAUfVBqC9EWh9Khc60/gTPAiNSQIQwFEJCEACHK3AQ/cYMEymEmxPQ+dB2SmTCHAUAZR66QQ0CtG1ldCTrFGaQXOU0Zc44KUwTchLIbDPSZH0gxItEEHvpVKeshQmFQBqTF9qgbXUVG6GdGXSKnqbi06npyEAalAFINZ2GlWMefPjKeGyU34+NQEQEOtUXyrS91w1qVla6t+a6lYBtNMCYh1rYB1g16OuEK9d0utJ2uqejE4gsHN9KWHPWsibanVvXI2PV4nT06gC1aMgBWoE/nNXk8YSpWzla2OfOoCfAhUDEsBAWUlr2Ev/wnKgFE0pgzZLm54OwKWBBWpVaYtW2Kn1land7Ur9qtHgDrWopb3tRFGn23rxdjS+HQBgnUtYq9b2k1U7KdwyG6DrQia7cA3uaGn63ZyaFrfUTa51l2teKkY3vKcdb3VnVF+1ZHdN912ZeCG3X4vS90sBptmA14o11WKUtf9sb0DfO13UiuWZKhHUMDfsqH5yeMPaVNRZ6Pmk2nEGwxzhJWeayeIWu/jFMI6x8FC8EeV5s3lNCaesoidibur4m9P7MY9h1SzvYROb5dvW+UgcEnvOCJ9EmSf8ojM/B1XyslVCrJAMuErRtXLByBVwfkW5SVaWEsxbla5S22jlRUas/5ESjiiF1yzLNpNScTidsJrzymYWXRnNmN1zYvs8pT+PmcEKPjSEJqlRQ8PXwol+tCbtbGY8YzmGc+Zznf3s5o3Bubg/Oy7/umyaK1ZYv/iVNJkp7eUzKzrNqT71qjl9ZzRemo+C3jKh/dfpk+kR1C8U9QZJfRtT0xnSnru1HYkdH2NrGtmmA7QUmR0gZw86vmJWdYAYHZ5awzHPcs51w3aNwF7D7NeV5SG41SRBR8sa0cmWNhd/SOOD2HJbuLQdMJlcFBNfpt4GWdSHQSzjghv84AhPeDMBXhAbZw/HChByiInsYx2D0+ITB1aQLZ5kby15ytnit0WkvK2rQnlP7v8+Nqqz/e4w183cOVs3Vi0rb1zLt9WWrnlJxd203N681F9+daBjrXIC/7zYQde2y+Mt9CyTF0LW1jW2I91yWDfY24yUuZbHPfWbwbxon073GLX+dA9FnevQzqCykVp2FyW96kNnedHhXbKvRw3diqXj2g/b9iqdveddj3bTMV1gpLta6Van+tyXXnesv5nsPJ+bz69eaVvrnO2Rn9rkX+54T0Oe6M9eOecr/+292xb01057Qe2uNbxvnumIj/vocW75wdtc7qE3OuVpX/rL8z3zXMt7SBhOkHt7K98n3ncv9b1NfztH4dCPvvSnT31OEX8gwQKmxMfZ43J68+LmzPj/6op8ZCQzp+PiMrnIMfK+kse5JdSG0BITDHu4T7vMM5r/+2eubtO75YAO4AAdkAEdEIAOcFFVxBr6B2xLI2z3FxOuBVovtV7dVi8LKHZpJXNSkl6CJVmNdn6sxzjCR3O2t2wx8VgcFVkQ8IEMcoEj2H++F0TtsVEvFVkIMFM6AoKdJ3Wqpz/+xxrtAVwpKFSzVSQ6SHqLl3j1l4TD5lNyRYSilYMtGIIX9HqCF3sP6Fk1CIVAlU4JKBguaIVqF4P/BxPbtYVkJVZEZYRTuINoJ3pLmHvbthZx9VISKFY4+IW9EYaBN4YliEgn+FqxJVZ1xYYWSIUChIHGpYEyoYXc/1WB+YeIDKSIocaIEOhcHEWBhhiJbgh4PQhFPygY/SOEgeVPenhVn4d7qTdpa1EBEPCKsAgBOAiJpsGHn6hFodgb3CaFh9iJkteHjpOL7NZB0WGLcHiF9jdv+FeLkuhELzh2wrgiU3J9ApF925RLyscZzocowDRww1R94BiO4jiOLEaNAOBw1QNx22eOwdJN2XMsRlZ+0iR+uLNx5oR+8vRx7hcdJ5cTJFFlLKJ+15Jycsh4CrKLhcWABLmKulcoP0R/jYeEjxeNcYOQlPWMGdOMOBR2GAk7FgmRiOOLd8eRYigiH7l/OqWRTOR6wBgjJ6mQKglGhNSRP/OSlDg0Mf/Jkrd4MDZJkzgpkq1Hki2ZJD1ZkkwFlHcjlDvZbw+Jkn0XJm/HhE5HO00Jk0gZODp5jAdZlTcZAAvJg1ppklzpk16Zk0oZli45lka5V1cZQmfZkMpRlEO5WGaZig6JYE5ZeM0WlQWphOkhl0t5EV/5hnD5l2o5lyIxmJ6IlkR5mIE5MHVJkVsCmIx5EuzoQq4DZ5c5O9rojcJEj8kzYq5BjqRZmqZ5mqeymapJTtzTmqz5mopSjEfIjG3Yi7bJibxxm7SJm7y5m715G7k5m74JnMJJnLX5m8Epf8UZILJ5nMNpnMkJncipnM4pnc8ZH9E5ndSpnR7SnLppndnJH97/eZ3MuZzlWZ3hqSLjCZ7YaZ7byZ3pGUTr2Z7oeZ7fSZ/xKZ7uqZ/1+Z7k6Z/5yRTzaZ/wWSUDSqABKqD7aaALyqD92Z0J6qD3CaD4WaDq2aABelUY2iYHSqEIGqHyuaEf1KEQ+qD8CaIjKqIrRKInaqEciqIKaqIXKqMzOqEeiqIaSqMvqqMxaqMgmqM+2qL/WaJBmqBA6qI9iqQpyqPpeaRDKqTs+aEVCiuu2X2wSXGreaUax30Uh5pe+qVg6qWbGXE6po4Yx6VbmqVWSk7uWD3nxCDrFxH66C1O+qQS+mRx6hDjEplkqIAquqJdqZi/aKc1qqS/d3R7eXjJuKNF/1qojwkRgqp5hhqiTHqouwd0iiqVjhqlN+qXISmRnheNLHqnj5qRbdlCbymlqtqpsud1pzpEqcqqpMqpnrp6r7qRMjeqm1qYn8p7WSeqf0qpa0mXt7qSsUqkk+oWVgmqvnasUDqltNqqEemrE9mnYBisyhqofPqHgFqpwoqYbFOsMlmn0TqrvGqrzHpuzmquq4qspeoQkRp8jcqoyepeelltfMmQ7fqs+zqViIqvmdqX/Cqru0p3vYqpOcet30qo9PquDbGZrYN8/1alWEqxW7qN6RGmGruxHAt9Y/pjZhp+aDp+I1uPJRuaJ1tL9vh9+Egtc5p++6erDXsb/VhP8f8qgvOapAy7c/8KdfkKltAatP1KeD1rdj9LmEM7s0JrkOhKraFqrXuIrX5KljdbhfV6rd4KXpdqeAmLhQVLsEprsE2LsLXntWELtjpbmYm5rWabtkvrrmobrukac8CatVM7rGsrrln5tgMLt+d6lHMLdrkqtViLt3LrtM06uHZbuOAqmGy7qG6btJHLtICLuOqquDm7pIbruHq7rl/rt5KLeUXrdgGrr2iruVcbbqPrd0e7mHzLrqdrqbNHtr2nsNm6uFFLtY+rqWcLurF7elubqF0Luai7s8UbtxcBsbekR2pasVpKshi7Ox07vdRbvcr0sWWKjs8zZGlqsSSrmm3/ur0tG2UvK0/k+rqfyx81ixJVm4i4e75iW7m0+6tQC7+/q7Wzy7VlS7zdmrn927iQ2bmYm7ogObb6W7tte7zo27vSerAHTL+2e7cEnJerC5WlC7Sh+7/GK7uuGrgjOcAbvLDIG8AeHJQgXK7p66/BC7DDy7uT67sw3MAGLLz768IKnMEi/LdsWcJJecIL/MIyLL8PXK0RzLghfLubS8KWS7f1G7OEq7or7LMXjLT3K8FHbK8V3CZ/N6gozMCwW6tCTMMIzL85jMNIDMCQursC+8V928YqnL9iDMEJrMFdDMRvnBWbyQAVpMdiwQCgqbIpu0t/LMiBDE02BRF+bL2K/7zIjOw7zdu9axrJzivJkByb2XIAmJzJmrzJm3wnHsHJoBzK9hvDbGygwhLKqJzJ47sn5Usto+zGKawi69tk7TuJ7+vEuivAdeu/ZUy5O7zEgrvLE7yswPzBwnzFRjzCaazLTSyzdky0UWy0U+y6ZmzFdSy60Uy6LbzGsezF3cyz2cy608zFP3zDVaxnWUxB4yypyJy7vIy/HVzMJnzM10zHOkysPIyVnuvN/PzMtwfHLFzD3NzP5kzK0AzQUrzNpmvQBG3P8fvL80vEc9zLDO3P4IzQ0qzQGHzO7jzBKcnMRdzR7VzAYRzQY2zDDs3RJA3RQ/y0If3KpXzQ8RzRLv890Wc80hR8YbLDx4rD01mRyI/8vd5rskKTyI181Eid1J8S1CZLjdZIyUL9vE2NJ/yYyqnsySVh1akM099c0OJ5yloNyqvsj60cZVzd0BStvnnaEHsK0jZtzeWc0mDM0nEs0WR80/Wc1nc8rTSduPQc13ot03zd0n7dzE+80vgszz3819WczPect/nslj7c2CKtzKYa2ag62Sqd0xitzQK90LCM1nj92Ifb15fL2JtNzKbNxC+Ny++MxeFswRpNxRXt1TH9zzNN2Kdt2Les2rrN2m/t2JSN2JCt2Pqs2bUt10FM1yYtx3cN18PN2bld1zX93MKd2trq1tZd2YA92g//ndirHcy8/drXPdfg/dvi3drObNuCjRR5vMdwAdRDjbJMjbJFrdT4nd/6Xd+A7NSDbMiFnGH/LeABnmIDnmJVHdacjNUKLsquPcznB9YNrsrMsdYMUS3os4+H/eCmMcsX0daYDavIHdoWjc2dLc6zTc3YfcsfHeK4itrJHdi4PdjUXdjqveG+XeO7feO9nd0ubqwjftslzsE03tx2jdIyPuRJbuLTbeTVjeTeveJJvMw/Pq4cntdRbt7FHd7GPN4QnstVvrfRvd5E7sA6DtzbfdZKDs9FntCfvdExnuUkzuRtntFvTttzrtxCTudm7uQ2HtzcPeY+btySDeN5vuRl/z7Dfr7jgK7m7D3jfe7mJz3Qaw7dm93ihJ7Zhr7net7eRPHeR+LTVSHfUk3f8w3I963fqr7qiszfhOzfBV5jBy7rsW5vs27rtR5wZB5EE87Jjt7poq2AEt7ru+4WFj4Sv47oyu4WHj5yxc4aQd7VwH7RTb7Z0R7s5b3c593d0L7p0r7ssH3i325F3o7tge7L2x7d117p2b7XkR7n3e7lOJ3j6l7u7H7uWl7aWB7vPE7e+K7tW77vgrHuj37vh0zlE0zw017wfK7o1m7vDL/wiV7SDy/vAv/v7u7w8D7wEC/x4I7OsW3uYi7lHv2UIr/PBn/lpM25/j7yG4/x4x7u1f/+8i5/6HLO6cAr7iev8B8P85Cu8TbP742u8tGtNK4O4KKOFKQO1VNd6qg+F0bN6lI/9V969AQO61Zv4LnecLcecF3fcAne67jSG2Kvycl+8zEPhsM+4WNdT2XtPkRP8h1+7CCe6SLe8T1P3Pp+5uk99M/e7p5O8XY+6aCN82h/8pjO5fNs8dwO+D8v+J5N+HAe9I6f8omP3l3e718+5Zdt9y/O+IIO5p4P5Hh/+JZv8uqc4uQc+i3Pfwiv+IsN+nJv2fCqxoWf9j5/+vcq6c4N5ZZO89Jd55Hf+5Qe8Xkf/O8+/Ede/B5v+sbP5smP4neu4sD/91Cs86k//as/+43/f/0zr/xPzvzHb/0H3/mwf9yl//uU7/3CL/2Sj+eGr/7xD/JfAeo9Hd9fX3z5j337X439f44AIUAAgQAFDR5EmJCBQIYNHT6EGFHiRIoVLV7EmFHjRo4dPX4EGVLkSJIaDQBAmVLlSpYGBJ5kGTOmSwEwZd4EQNMmTpk6eeL0+bPnS6FDaxad2UDpUqZNnaIUiFRmVKkrqVZVeRUrVAFbs3b1yjUsygQDzJ5Fm1ZtA61b21Z9Cxds2LhS6yK9W7SsWr58FeQVCpin4MFzvRLGifimYpl70VqIEFlyBAtq/xp2i1nuWACMY3pmCdqq5qqOzTpAkFq1agdpL3MWnTK2/2zSdmvjvV10Ntm0EFb/ruB6d+fcP4cfL248eWHOpif8hj4B7euxu5HDXp44++Ltjc8mgA4dgmPqdLuHPo8e+/rq6Vc6FhEeemWz5Q+7p83evP77zc2Clw+6vezLrD3+CjQwwbD2sgA6DCTAYL76rttPwf4sRHDB/3xbTQIBJPhtvAnxy69CEy88MUOvHHtuNYZ+k27EAzfDECsKU8TKtApcFGi14M4i0EYSiZuRRhyNXBEt1FR7kbXpblQxSiGLtM0/tCJgssfUIhBuSCKPrLJGJKUsLS0AEWgSAdMGCHJMN98ME0zcrFRyggkYsrO1LqnUzUsoyYwzyb4Y6ovNP//hnFPMQAFFak20MhAgg0LbXBTRPvm8VNFGC9UgAw0mPbRSSwPzs9TmEkA1VVVXZdUEorwKCqtYZX11q1mrulWqXJGiqSRffwU2WGGHJbZYY4/1dSdca6X1KFuZXVaAAqaltlprr51WIGy33VYgBcLatShlp9SUVOy4RbdabdNNVyACDDggXnnnpbfe4TA1V84+2W1XWn65FaiEBRIimOAF7i03XxTv+xdgfxu+VqAHCqYYIYT1VZjRfSGO+GGOqZWYYA4uILnkCyhI6OKFNVbu3I/V9fjlkBHaIMCUvcS3ZYyVexnmnkEWYOKDGAgQgREsxjlhnVcW8udsY/54ZoOzOCgaAQYOUpnlpbUuzOkC1v1Z6gA+qBqBC7BOemfm1NbOa7B7FvuEshFAuaCsyWV7qlLdhppjqWueGwSD7h51a7wZdvptmYMOwIOIIAzvA7vTZrpw7VwOu2+IQ54IxN84GJxyri/PW2+++Z7YcYgg/+3qyTnLee3KbTs9ccYPKnsDtGFXWvbRuas9c6ENAtzm3XmfPdHkN84c9YRACLDu0JFfPuPDMwse7tsPokC+s5HmLCAAIfkEAAoAAAAsfwBoAMIAigGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nn/QAAAXp6yNnbSkCJQzZ+0eXlK2lpsrG/yRoaricnUFZWLAAAeitkjDg4ABUVb0dHqgAAAFVVwhMtthc2kiNRZDN03AwWMChcP19fph1Dqqm2vrzFu9LS0w4hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AAQgcSLCgQYECBCQYwLChw4cQFSRc+DCBxYsXKyY8yJFjQgIBQoocSbJkgI8mU5rc2LHlQJQqVbqcORGizZsSFVZEwLMnzwgaBcx0CTNmzKJGU7Ic6lEAyKQlmXasebNqw5wUG1bwyVWCwwRLpRZECpUk2bIiw4p96RTtyLUGqVqtitWhA65cIWQFKxQuwbNuAaNVC1ewUb9/dc6lK3dABLxcLzTki5jtU7cjDUMlvFazzMoAGi9+WJchCMh4KVIG7dlo66N9K7+Oylrx6IhUE0BAzRXogNWy22LOLHx4SM5iZ5MEHdr2bYelL/DG6+A3cqnKVxY3fp1pdpHMRT//H1BaAl4MExKqT2ghA3DE381uH959aPwA4Z2PJy/3sc/0661nwXt+3ZfWfJjVRxOCUOWX1X782XaXT+gFyJ57ChLF4GAblpVhS/c5COFVjW01nWQMEVhYh5uxmNSHU7mYkogjRvhgAtPpNRmMTV3GnYyw5ecjWjSOWFpD0qFW3Y6xwQekSgYe12SBTy5X24P7HZnibnhVEJSQxh04ZIJTrjhmg1fWaGONKnZWpXZncgjmcEVCqOV4bSb3ZklRnlSmm3EelmaNdz6XJ3Z7yhdoi3NiVmeW4t12qHeJErfoi3/qeWlKBlRmwEQYhSqqqFiNaqqoCXWK2KcCKLBAmH66/wprqp4mJCuZFuaq66689urrr8AGK+ywxBZr7LHIJqvsssw2m6yqfrGqAAHUVmvttdceIC223HKbE7RwsWrAAd2WW+235Jpb7re1CmBAAwrEK++89Mqb36vGLXBAQvgOtwCPByV0QL+Y/SvAwGEanF8DI+YHawAEJPQwwHFVKubEmSKqQMO1PRyxABg3+uOmQda2MYQOw/pxyLWRDKXFUuZ38n4ph7nyrBlT6rJSMPspM8fBeSwxziLT1zPFY7UKtJNCg0x0yxjvrN3PKHes8tBhIp2Y1HwenbN9Slcd9NVOZ/31glwrynJwM49Xs3E3m100mWkTRzXNVtuM9Y9zB//m9d1u5w333kb3zWHdBwL+3NvDxc031DgjHrPJS1PZ9NpOSu6n5lqz1fbigjdOOJmGe/g35WIzTTbmVHJ+OtuVr3j508G57rritzGOmeOFQ2727ajjPbbeZT9ee9Ssr/h57qHvPrrfpbcIPOypW7467Zkjjz2Vy48G7lqswrpAThh/L5a0BJN564/mSyXtqfA7K//89Ndv//3456///su2z9S26uqWtmwVwG6xa1WpSlcBr4WuBWLrgNFKoAOrda+E7UsA6UOLwmqDsHwJLINl2WBwOugW3bmFd6Tz3ciSByjjmBAtKISeCo02vezRqXknfJ6cZkg3FmrKhTiEoQ7/TRc9TNWwdUAc3uCK17vjRc6HiKrbC8sSwx068XdQ1FkSVUe8LILNdl5E2xat18XtmUl7cuMhmpQoOiam8IorNGMLb8hG57lRhnCkYRg1JMUgUnGIjFLj4fYIokpNESpVJKIgTXfEM9KRi0skZIzAKMcfPpKMkaxkFNFoPBs6yo+IBCSmiuia13myhKBMSiIDmccealKLl5Td9dLYSr81co6frGMO72jFU+rxlV8coyzLSEtfurKYSIylm2aHTEdiEZhiHI7/hhK+hJEPZ9OcCfpwtj6jZdMl28QV/8ZJznKa85zoTCc6v9kScbVrWhM8F60Q6C4FxrOB8aQWBMOV/yp41euf88pPBT14Ng2RsGAfTFjnECIAhlVvRYcc5SKlJ8mmdG8xAk1lKUkJm1vqqXsOCKlIQ5qVjOqyl8l8ZjPdtLwJdakhJoVkE41py4oGLGyT4RJklhRTTM40pXFc6UcfYh7eQIAhPR1mJ4H6S6Fq7CvT4YlXkrpMaPKRkz9VnkNMFNUEUFVPNq0YJZ1KqZm5NKoV+Cqiwpq0sS5VqwzxT1R5olZKsXVrWH2jkzaWAQvkagIYwAsE6gq2u1omr3jc6wD8qqsJ4EUChF2QYRnq1qyydAB9/WtgfQKUyGposs2prF659xC5KmkAni0kaPvE2oIW8nNnfSlqaxPRjf9O1IirxalWeEOR1MYot6JNLGkrEgEIGPe4EPAKUmmr0ZLVcpBWfW3s3FRb59IUumQF20Xn4tumABexKIWr8JxU3Zdx9GUefSqE2NkRd9KzfO8Eoem6SSb2cuR98DOVOvfL3/76978A3p99DwLAfA6wVfnU5zwjWM8EE+Bb/gSohOO1YH42OJ4D9dcF5ZsUETrpoG4xmENHNCmwgXiNMrXjd7mpphJHM5cp3uWKzbbdxbj4qsoE6yzfissf1XguNy5kH0/6R14q8rnzbfFCQyvMqhKTx5aksZJdO8km6/jJlo2yj6d8XisROZRGZuV1k8ymJYeouSpZpUSR3KIfWyX/yFXO8Vp3nOVNsrjMVO6RleeM5dE6c8t47vJb0JwSNduWzZhyc1XgrGc525XOfu6x0RR9E0bfdMgxFmKY1zzmNnP5tjMitEkMbV2mqu/TiA71l1UpykN3OtGofrWqM13kGQOaxGY2pKhLQmrzgho2lLaJpcW650f3Wbh/nnSsTY1in6o4unGWcqB/TZtVG6XXPBN014INkWG31TgDNkg183VNs4W7IOH0cX7zW2Hw2YrDLwuwvOdN73rbW53nJoh7GZxvgezbwv32d7vPN3D3FZw5C+puxRS+cOYGB+FUYnhiJD5xh5MX4tS1OEQ1vvGHcxzjGqI4Q0U+8o/rCeQn/zc5pUjeHJYv+eIeh7nMI67ylaPc5jGnec51PvOO3zzkNU940IW+c5//PEYkz7XL83zToxO95ykvesalHnWnNyXpTG841RHFcqhX3ehgn7rXuW51pA+9kEtPe9mvfnazbx3nY4f72iv+9qfz/O5h//rcS153oPfd73EH2961HnjA413sh9f73pX+d7S33e2FPzrjI+/4xkM+8WufPOblnneyWx7lmu885xEvesFHq10BB8C/Ce4u1LuenqlfPaXuTfva2/72uO9V7AmY4APDM5/7dHfrYf96fttzgRku2IYVmvWkqX2E8J61s2UMbbZ/vunU9jKtwZzb58vaJOUtdP+rS116u5cfxzCevqa7/3jrf7/a22f1pl29edNfn9iOLiykkU36/itey0SyaySBbVPTfth3f9+Wf5K1f+Hlf55HedGGStYWEwTYNd5Xfy8mgfF3bePnawhIdxDYaOmnVG3Efh/IFtoWEuE3ah2YbQZIeBiIfhqofrVWfQcYgjfIbEmxgrzWggV4gnyHg/g3gk6WSdllfg44egBYFjw4gD5ogS/ofFGIVwr4WQx4ZEIohUDIZFWoWlcoZjFYeVlIhUR4ZUYIZQ8YhpeXbEwogCNRgfJxgecnZODWLuJTbuxTfACnh8JHXy2Se4AYiII4iOm0ewjWewV2TwdHTYuoTY3/CE6P2E4ShGG1EX0qoS/8wnxymIQmZonwR4PcZ4MwOIdrKGltOIFp9oRxOIUoyIqUVWz6d2wN+H9pSIoiOIMk+GxHaHi2mINs2Gy5SH27KIZq6H46KCioKH7zR36ciIS0CEtlyGdnWGdKWIvNKIMBmIwsuIweOIatuIVnpo09yI0uCI7NB4LHGBNN+IaqaDeu2HLvGI4bSIHtmDjxeI7f+H7aB4ryZ4LeGITp+Bni6ITk+IP/CI/miGn8yIEFCYXmuHQKGYzrJ4paeJC5FpFFWIIUiY69OIqmCIwZqYtoWI0kaX/6OGgDyY4NuYoPeY8YaYYaOYyl+IwmGZDSJ5E1/yiTxtiRFWmTJmGId1iPUmKIRMmHrOeHmEKISrmUTNmUx2KIvxdPvudgwcd6RUl8sHd8BZR8IbZ8BLWJNLkgJ7aDbigScOiOLZmQsLiAsoiFxeiL10iHXfhbX8hpb+mRYYmNpziPqbiSaGmR+AiQv0iWKWmWQukz9wiRa2mFbQmGPMmRcRmB2ciXyuiPd9mTg4mMlLmNlvmY+eiTn4iToaiTcJmXxJiZ6liWIXGW9piWgPmS0hiTI1mTnimYH0mYmzmOnRmZO3mbmrmQ9OiXrQmYijmX3lWX9FebCPmai+mFjWmXyhl6qCmQuUmQu2maM2ln0Whs0xhp2Nmb33lpzf9Jl8+ZnLxZmta4hCAJkyJJjbR5nnipnW5hiAxANvVpHAwQie2ln/fFnwTmn+J2H045oARaoAR6lfyGlQmqoHvIoIMnMAcQoRI6oRRKoZ0CoRWaoRkqnfCJmdQ1LhoaolypQV7pLxwanvFJKWP5m6LZjxv5mdEZmFy4nbHYnfyHoh6Ko2SIiyEpjLPpjOkZpNDIo+zpo+4JpCWJpMFknDfFTD/Kix0KmeqJm8DZl9cppO85pSzaoxNJmimapViagZNZpZX5orapozDqm6lZmKt5mCcaplCqpWtanSp5pUkap/I5pi3KkHYKpncqlzTKljY6i3B6mlF6WExaMU56pHj/+qfZOaR6yqU5+aSGiqZnmqd7SaacaabLeZlSiqnrGZvt6Z2F+qh+uqSBypiD6pYxWpyp6pyr6piHeqmOeouRWqRdSqmmqqS8qpdQQZ/2GSb5OXwLWqwNymB1Y6DKuqzMWnsIeqzC96zRapQGR6yD1xwhmq0ReqEHo63Z+qa1ip4rB6LeOqEjGkIlilAyCq6nKpaeuI97Gpx92quVCqpUGq9WyqnsSq+Sman4WqZemqOlCp72uqW4OqmMWq/h+qVi6q+SOpq6SrALK7CQ6rAHC7EJu6uN2q6+eq8P66IB+6kTK7KoSqSiaqSkOrJpaqkzarLcKZsZK7Ecq7Ale6sn/5urMSuuM6uxgOqyNQqzKbuzMsuvtmqxN4uwQUu0Oqu0Q+izggq0NzqwS7uxDRuqLzuqUauytCq04pmoSbOoSUu1YkuzVeuxFwuyETu1ZDu2/Wq1P4u1hKq1nRqjsHm1KJu1XMuwbFu0bvu0cMuqszq3gXsfwEo89zkcwyqtVkmt/yOgzfq4kBu568S4jGiti+ugl2us03qtDFWuGsqtnquh+7q3aot05Bq65woVmIhBmpiY97iic6qpuqmv61q7KYgfqgkxbmq7rptq4Je7rDk5rumpiPqq5Bmr0Dm4vOu7ofmxfEq7YFmwsfuvmxqyKyu1etuzNmu3OBu2a/u9PP/btmZ7tBjrveE7tKTbtNv7tncbt3lLse+7o+vrt+0LuCw7utprtNyLtHjLtPDrv/Krv+zbvf2bvv9rwAHct6oKte4LwNcrt/Iou9YJvb17kuABvLsbvRCsa2yqu8IpvMTpkuN5nOXJjPe7vBasghj8wYg5vHQ7wk2KnCaMvQcMvnybFIW7RIeLGYlLuY5ouZlrYckquURcxEYsP4pbrQEne0rsw5AIxE3MuYKrJ6G7oShMvFubcKfruUv3riXhYVg8xTScGLBLnWGcwRV8xhwMmPOKwA9cswLswAzVxjZcujdssGPMFnR8vnbctcaLvrCas9nLx4N8x9OrvHsMyIT/nIB/3MeKSsEurKZmHKOJ7Mg1LL54vMGVXMiWnMDjq8mQHMLMC6+IHMpqfLtLt8mXrMiGPMmlbL1Z7MaxnL8KXMePDMtivMGofI+qTLJy3LLzK8vN0ctv/MsRTMmm3KrZh5JsnMzKu8vmSMyzvMiebBRJ3LgCsMNu0cOYG8Way3pDfMTiPM7k7CvXXLlLDKDops76xs4DwcTYnHojVMUTCrr0PKH4S83FHHJbXK6p22HpGmL5zMqdzBZlfJPOK6/OfMLQTKeGycIDXdDT3MoIfbbPi8sRzcnqG8cL/LeyytAi7LWJAbYFbMurLNHAzNGB7NHJC9JqKdJ6LMPdeMoh/93It4y85unSzAnTcyzT5SjKO23TX+vTBknTLy3UI03UDgnUaly3A8y/DSzMGc3ITtvR9fvReTzRKH3MCZ2vGH3FL8zTw6zULMnUYY3UMV3CM63MR13VK33VLZ3VuRy/xevWx8vA9ivXU13Nh2zRCv3VGgzHv9ou2rxLhV0W3PzN3gytwhfO5fzYkD3O5/zD6QzF8ezEkmjZ6CzFcy3He73PJu3LkgfWOm3UB2nWpc3Wp23aqs3ayonaek3aqb14sq3Lafzarj3bui3XsG3bkcyyvU3XWq3RJ03cCPfZnS3VtS23wW3Myy3cyf1zyI3c0a3Pww1x0/3czt11uR3bgf/9y82t3N8tzOEd2qBt3dVN0BiX3eNt3td93Nrt3um91fEt3fWt3sWd36It3+WN3tRN3f2N3/vt3/eNbpi9n5r9xJOt4AfenwkeI5Ed4RIOuQue2ZVtiAWQ4Rq+4Rze4RmeEB4e4iFuKw1eEA1w4iie4iq+4u993vQt4jC+4SAe4zH+Ef0culqDJePRAOxdGzRe4wLw4zCeECPgxSQBxlKh48+RE1iXH0Iu4jP+5B2eEA+gEh8gAB8wwy6h4xcQAV7+5RGAItDR48Eh5R4e5Wau4VRuEhugAeqhAXBOAUstFlgSWz6xJCRS4MbNFmnO4Wje52tOEhzAE+rhE3Oe5F//oVOCRRpk7iR9LuNB/ugfLgBVPhIM0BOF3hMhUNZ07hBFxRvKxRBM7oq5JumTbuoFEOgisQGYnhBcscM5Phlz5QFZMepAWOqm/udpruoB0AE+kek9oQF/2ekMwVUnkudN7uO5HumSzusl8OuuzhVyDsLEbue8JeqNTiWorutmruqDDu0CgBceMJzEniQUAliPhe16rt98vuzbTumrzhXA7hMdQO2I/huKjgDp4Vg+oSMRkuxl7u7LXum9zhX7jhcbQO73PgCfTujR3hOhbuv/iOvNzuyPzuus3hPosVk+cbix3hCmNe++oe4A7+gCX/EEHxJzxQHDvvADcFbzjuf/p07qtfvu757yAfDtvMHpLj8AXAXsXjLm6z7gTWHzA18SHsAb067wTIElOOLw4c4TWCLxWEzxF2/xgA7vJEEBqCHsPN/0EOEAEiAB6jH2Mk/yNO/kJ3/1OB8SFPD2cP/2hf3xNqEejDH0Ln5TRo/yd6XkDOFXFnD3Ja/ta5/1bV/SBuH3mNUegp/2yl7xN9/363YqJODOAvfgCI7hhb/rAiACsJLYYhEQACH5BAAKAAAALH8AGwDFANcBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/0AAMjZ2wF5eUpAidHl5UM2fixoaLKxv8oZGU1XV64nJywAAHorZJA2NnJFRQAWFqoAAABVVcITLbYXNpIjUWQzdNwMFjAoXB9vb19PT6YdQ6qptr68xbvS0tMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQsCMCwocOHDxccSLgQokWICxIe3Lgx4YGKF0MGyCjgo0iRJDmqJJiQwYCXMGPKnKlgpc2EJ08SwJkzpEabKhMS6OlTwFCiFn8C7SggwcynUGsuZYrU4k4BVSEqnVpQaNaHXr823Mp1YEKnUNPClFqWJVaxDK/CZUi2bVi4d8XWLXtWrV+2bRG+hSt37l6ueb8mznp4al+/aQEH5kmYsl4Bgd0exWt0boDGSx9DfirZ7mCxhTlnNtvZcGvVqwGIHi2zNN/TX1Nfji379eXNu2PPpr2Wt+XcxxljFu5bcXPlxpsSpxnds27n0YE71w6dOdrpxZlb/09eFTTQxeWfp4/+HfwA24hxZ73efTV6pPeJmr8p3f1L+I7JVxV96zHHXXoH4recff35B2BoAiJFoILZeZZfT/utNNx0D54XIVET6rdgZhfmVOJJGQbVoHsd8jfehxiOOJl6CiYoInv+/VfdXCHGWKFrNvroXY7v7VgZjCbKaFqQSTKJopK3tQdeixoiqRN5FBpoIY03DpkjlSq+uOWPnDkpUoocbUgcmGlaKVKPSZL525heOmjAagZg2dMCCegZ452Z5dkUSM4lQKhygAYmaAIKNOroo5BGqkBClFZq6aWYZqrpppx26umnoIYq6qiklmrqqaimquqqrIqaaFuLEv8g66y01lrrAbHaqqutfQrwalmCGnDArsTO2quwxRZ7LJ4JIZvsrglEdyhSElEEV0rMmSQWSdp+hS2D3Wa1gJGo+RmnlkDSyaCZIZGLnJtFybmduiRy2ZO785n7pLzKsXsRmkz5axG+A+p7JpTxCayVveeu6xnBEhocL7pl0jujwg9BDKLE/yIcIMYOnXgwv1Vp3BOc+1I8p2skI2VyTiiPrPK8LM+c1ctXwttxyyKCPJbHEPrMEM5vcpwU0B4K/RnDKTs8F9EhxTyx0xXXTLVYUF8k9c42I2jxkg+Lx6PRC/Mco9IAHyRyu2If+fVtaDMt89VfZW0V2WAhzV/ccZv/nZPdEG19tN9P9t21y22Xq/Pgh/f8dsJhMyim1fXy/fjHkdc7OWx093t50JnPuHlwnXtN+cWe/cqVoHPxiXfIvjI7qGuGuqb6VItOq2CrvPfu++/ABy/88MQXz/vtS+X6rK24nrW8rssG2uywz9d6LPXVG9us7AYwkMD34Icv/vjSXjuRQtembZBHuu/Jfvp6V+nSl4A/JHjZjZ/9uYdScli/Q/fLG+EOZjgG9W9N/2tIAGE3wKIUsF4HpA2bqDK2xeGvdDXaH38iOJoJqs2C9nvdzxrYsQfOiIOQ8eD6QAhAEdIlfiqy3OlMg8K/JDAuLlwaCY9mQhoSSYVdYaEC/3OoviDKkHMQ/OENA7DAEeavSRqsUg3VAkS3jA47TyxcFFU0xcgssYkv3OHCehglJSbuXVts0xFJl0T6nTFfQgxjFgmYRqZ0MSpfJCIM1bglMsbnjqTJYxx1OEcH1vGDgKTOGws2yCJqpo+HXGEiayPISBoRkjMsoxslV0FLPjJdmfyjGTnptlBiDpRIPOEoNddJU4IOlWxU5ZeQBxTWXatXW6KlTXJHu/b9SXaMkpQwI2W8YhrzmMhMpjKXyUxM6XIlixqfNKXJgGBJz1fem6Y2wTe97NHqet7UXuyu6axwRos5CiASAxwpmPk56H3bgqe32CmYcFVlXOgkUq/qxP+itZXQk6xRWnTSmaN9GpA0wqyNP3kIUMEINJ8FpeeGFICAilq0ohJQqNwM6cqkhc40BPWPQdsYkwpc9KQRiMmkNvrPju7to7cJqXtGKkuYOOCkJ4VASFe6RixikCgD1adEVzQACeD0pBZYy0LH2NDePJRBMgUPTX0IkwgcFacE5SkmUwm2p0FUpEP9jgIgcNWTVuA/S82bH08Jl6BGdI+IhIkFyopTB7wnrbBb6yvb+tWZhrWqOMXABDB1gQxoFZY+rRxM4xPV6UxVky8h60UHm6kLHLZqXIXbYgPUWOI8VpQ2PalgCWtYvP5Mrx71KlSFCldJysSkdE0pWlnKUJf/Vump9eosbT4bIClRtKwQUKlpX4jal6o2t6zl50vmelW7KpW2TLVtDDcLId2OhrcQ6t9Yj3pW4UJXrU0dbgDcCtbWBnGS3u1pfRR73BlZFzLY5d8PxUtf816yvSBN7kG/VN/i3pa6HnqvX+K7wfl+N6/hPXBDnqkSQQ3zwY6KHqzOAmEIn8WXSaodZxjMEV5yppkgDrGIR0ziEguPwxtRnjeb15RwykrCE/YV9rwJThfDGFjNyuY2tVm+bZ0PwyihZ2/sSS15ikvILVml6FqZWcghdr01ZVElpcvHJxdovw6acpPZurItZ1fJprkilLuK2VhSNcuLjFgj7fvJMic2/8pT0rKZNWvlLGFZymne2JrFCF4q2xHMtxHzldnr5jFDFs+kVFyC1TtoOPsvzyfTI58R7Oe4bpKVpfRyagvd6DMjGtOKrvQKGW1nkn56yZmes5M5XWpHIxDSMJN0IVuq6QJfGtWhrvV/69ylO8cZ1jlb9FZV3VtAx0fQrSZzl4n95VuHmcnM3vSy3+zpXycajaK+L6t7bWprgxrbup4ur4Xkaw6h+CC23BYubQdMIB9Mw5c5t0EWVeEHm/je+M63vvcNYnkXRMXZY3ECXEyAG6+umzZGuDm3R84ZZ++cDHL3RaqFvniymTVEJgq3JG6Rb9Ur43uSM7XpvG1yExprwP8u2p5nXdtwV5mv14ajsMfdsJPXLeVRk/VP9ZftNqM85oyceclrjjr8BhraI1/1tA2t9JsDXc1CX3qnSf7zb8u85wEddtK57HSrBx3rDtU607l+M5xrTec29xzYnQpgFyF97HuVerKp3nVcgzvaxh1609JeMrPfbeU7h+LaxStyuEubZi4PWNurhGxuF13vc+M74p6u56gjHu+7hrnXoT54BRPX8mWnfKQBL3nHdR63dr964j9Iascru+7PTvXW43752R8+K/4mSLq9te4Nt7uX7L4mvJ3D7+Ib//jITz6ncj8Qawqf4AbHncJpPP2H53jH22S4oqLT43levJ4cx4j/ke+ZZP5+nze9WSKSPb+0uKH3uehnjvrP398tHdAB+M8//qO60vgzaP6Tdlr2NxM3dVQZ5V3+Vy8AyHLRxRn9Y1TNhYAJaBoLGHhaRDsyYVXAtVNCxn1+d0Glx3MOqFKSVVZJNVsTSIEfKEAM2GcjKFd0ZVFZ1YHyt4IM1IKU9oLvEYMW1V39l4LxUYEhKHg6CFs8iADp9INACCFC+HhyJyJOkQEXgCkTgAE4pVN3dX4eKHpE93pqN4JTmCkTgFOypYRLyB9N6IWmM4JSSIVWeFEHmIVnyIQ26EQWSEcYGBMQWFbOJYdziIZ1KEd3yFE6+BIgQFfdhYJ/WCVpSHdf/3gZ/WOEWCWBi9gmjdh0j1goNCEBENCJnggBIaBRlagil0h2awiJBjaKlhiIhDSItIaK5qeKTFGKtJeJyvF+iiiLQUSLt3eKmhiLscF8AuF8iqJHwggAHgaJ4bcwwVRvkqJ9MaZ80jiN1FiNzHSMAFc9Agd90IhjMsaN2IR92tSNB/eNNtZ94vJj8BMdIJcTJOFO/UR/JbGMDoFPXBhspzeAwOiKA8OKYNSKQ/gkuOiH/AgRhTd1mJgeA2mGTgh7Ryd7hpd3sPhO8oh6sZdrmCduhTgl62eRD4mRtieRv0iRATg0/oh2DXmLqYiDDXGQc5eQCrKQHbl4YfJ2CGmKMf+5kgWZMSdJeimpkDoZkCLhkq7niEC5j0LJNveocqB3lCTJkia5lDnnk2qYk0j5k5O3eZWXj3n4lDvpEERpclgJhUE5lvfSk01plV6ZlBcRll1olGoZjyU5XmjJlRvJITNpdMdmky+Jk2R5lVUJVHW5eqOmj2tpljmBjS7kOmummLPjgM4oTOQofY95GdZ4mZiZmZp5Ko7ZmeTEPaD5maK5fTX4f6Z5mgpYmqmpi26xhTPimq+pmrHJmqwBm7dhm7cpmypIm4KBmwHim7+pm7nJm+knnMGJmrO5mspJm+tnnMe5nLsJnbrYnMgZncl5ndbJmtQpnUHonHRYndMpj97/6SHA+Z3cqYrbiZ3DeZ7mqZ6ymJ7ZuZ7u+ZzzOYrwKZ/x2Z3gqZ+8eZ/8WZ/kOZ6ASJz+SZ/5aaD4maDvKZ77iaD/eaDtqZ0Myp4B2qARqqDoOaEAOqAUyqEb+ocFeqEPiqEi6qAZWp4eCqEV2qGMSJzFaaEpSqIr+qGk6KIhOqMqGqMjuqO4E5rFOE4x5pmjGaQ+Go2beaRImqSb6ZgD52LbmHBASqRD6o2d6XDVA3H1Qo8NQXFayhAeR6OrCKP80Y4nYY9aOXoJhqI66pdnKZVnR5UyuqYlWot956Z/l6YC2qJf2ZKDmZFhyqI1uqdReaaxBqc8Oqc4CpeCaaeB/4OSOaqngPpyVZd6X0eYu5ingcqW/cioIWSoJoqoctqLWUmpnGeprYmpf4qYJ+GWewems4iqiqeXAdJ4YvmomeqqrEeTbUKrb3moieqrIhl6hIqPplqbsPpBc8mqkYerK6SmmTepF3l3IQmpzHqpgkqXnNpCngqq1GqrkuqQewmRN8mtt+qtsap5pLqVxdqbx9qsUImtw8qUnees5aqobRqvU4mnYtqt9pqYstM6vRdvRUqlA1uOw6ccSpqwCruwxcekBPekCxelBDul5Vil0IeO96SOFkevqWquH0SmKKGsUxOpr7qvGgmtHymtEcmvwMqybPo3fTqt9dqyM/uyq/8asytbs+Tasf16s9k6RNv6q58qtHQ6qtGqen5asiSLrO8qslxjsjxLs98qrOmKpvPartaqqQaJs+NKtF4bqsFap/j6pvq6tO56rU7LOFCrtNXqc+A6q3xZlFLLth5bmLIKIbzaqnWbtW2bdXfrIXm7rHt7qmubq3/rduLalztLt3H6rG+Lt3Fbq42rs1/ruFR7tJWatExbuGertTz5szgUtGDrsotruOiKuaWquZ3bt+yKtlyruJVLuaN7sl/hmABrjAVLmUL6owebHgz7u8AbvMvksE6ajc8TfckzmckrsRXLPVZ6vBhLLRrrfRzLuHP7sV06EmkLgqz7omarbSj/G64gmbNRO7SzO7ViW7WFWrbdm5enm7JIK7PlW7qrG5j3qr7Eqrp8O7jG6rqgy0SOOrnzG7voa7Twm7nya73mS7qiuqhje6dXy7n727ND+bpyu8Cyy8Bha8Diq7Jde74KTL/g+7iAG7m9isEDDMKmG75wm7gXLMKE+71ue7kHnLoJvLky3L+eC5YWLLnXW78CfK4sDLku7MMoHMIELMQkjLjj+8Ea/MQZTLs03MHxS75IrMJADJMOjL/yuq7e274VqasUVMQnDMM6DMZK45gNUEFrLBYNoLy1BMe7JMfQRMcNNlxvLLx6vMd87Du7K6WAHMgT+6N/DCzseACInMiK/7zIi3wnHsHIkBzJN5rEOPyawhLJmJzI0atx03tkGgrG7Ji9ZsrF+RrBOdy6O8yn//uPk4zFE6zF94u66qq/MQzKTdvDZUzJWXzEK7zEjGfCehvElcy/YXe4v0zGwfzDr8zLdvu+VIzAVjzMwrzLRbvFsmy1XtzKUFzA1lzDs3zD1KzLy1zNsezN2EzLZ0zMX2y/MLvKAazMtazOhIfLyczM4yzOM5y+17y+pmzL/vvAjSq623zFA93LU9zCTQy7rhzP0zzCB03ECf3C+JzODZ3PHIzQHqzQBR3OC+23zozRVezEUZzCG+3Q+mzO/JzNnyzP7KfGbAwXeVzIzUuxlP8pNHncxzid0zr9KTKtuzS9vD0N1LmbPIecyZjsyCVh1JmszSNN0KR4yUoNyZu8J51Mfitd0RQdHyAbEqO8z/kLzvfc0aisqhXszgLd1NIMzx49xCWMzIKL1WPN0h75zDYczRxd0gwNy+0M0J3KviydrPT81mod13DNdsZck249sv58yob90RCd0RIt1utc2PNs1n5N2WF82LsKzIJtz3nt2Wvty4gd0UZsxoQ92I3N1kwM2aU90acN2sXs2G1N2rks2UzNzeVM199s12GN11lts2XN19p62ajtvqp9zLRdz6Y92cXNftvLghL82ctd2cINtMQN28wN3EpZ3aF73dP/ndlw4dJH0sZfEdNDHcfM69PXZNM73d7u/d5Bjd7CSIyCPNOEfN67VNRR3chDtt+QfNtOTdJP7d+MPNXuWNVFdtXNHcrX8tw3GN2//d1zDdLQLNICfuEBrsQPPdusXdu+/doSLsYfFLiK/ddYa9HdrNvnDNbS7drZTc57Tcpk288m/s8yDsEqXb1prd1tGdgljtk63szHPdodrtwuDuAGfdIqntLoDOJHDt5DvtmJ/bSMjeRCLtpSntyd/d1BbtIX/dghrdFofddj7uUpTuF1beEZvuMhrtljrOU/vuAnHtobvtphHtkf/uJPPuFgXuFijuFsvucivkIkTuWLjcaD/x5Eha62Va7gPG4R4q045J0V5v3T8o3fdex5N/3enN7pwRvfc5zel27poQ7qmY6NXe4WBM7IVt7bZd6aUL3qB5DqrCHKrd7itj2PDT7nguHdTw7YEN7rNA7ktxzsveHruQ7sjY7seW7cWL7mhD7scm7jLM3srx7hDZzb2P3O2+7oMO6zyy7t3a7sYGztgE7muB3j1S7uXF7s4Z7jvK7nG3zm323u0O7q6Q7u5c7uv+7u+w7vxu7sdd7sZ33u+H7vKK7t9c7vye7v6w7wjU7uD9/k8k7wEo/Z9h7ouZ7GmH7HAjDpVVHp903qpy477O3pKJ/ySmrqHj/fdtxhL59iMf+PbjM/bzX/b/q96kgt64p869ie7LG+6gZepgiucT7v5Lm+1RPn4HYY8fFO3Tce0BmP7ghP50qO5rut5hpv8Q6P0l/N27jO9dTu9V1M8Udf8VJ89X2e5n9e9UhP8HzO4Xfe2g0f8M7t44Ze440e93bu53h+7W8P+Kn97CPO2XE+7k9/95bN8GKfyoMa9X3N+IIv8Gov935P942P6G5e+FPO6Icu14luRYaf98Ru93yP3EW+5f1u+qHPGovOvXqv+bLd92z/9wYf9pOv+NwNwAXv9mh/+1aPFJH+LiCPFCJf3+o98j968irf/M5fjSwP86Je6h0v/dEv89Of34m//dz/b+y0Hvi+f/Zn//2/v/W5T/40aPfd7/Tev/6x//6Fjf7fJ/7e7uLy7/7wj9r3r/74H//9j/gAAUDgQIIFDQoQYFDhQoEIGT4k6BDiRIoMJVZUeBFjxIQbD3b0yDFkQY0jS45E2RAkypMbW7pcaTJmyJcYa6aEiRPAzYk8e8706BOi0IdEdVoEmjOl0YVMMya1CbWi06MkpVKkKhJnVq1Lr/6s6lUnV5Vjvw49WzQt0rAs1zZ9+9Hs3K1x5baladdqXbpi+f7Fq9Sv276EARsOHLVw3sWMD8tMLBgx5MeOB0+OjLaxZMqXO2MOaSClAYSiUZIWYDp06dGsT7seiVr1/+rUrRHexp1b927evX3/Bh5c+HDixY0fR55c+XLmzZ0/Jz7bo2zb0jFSfy2gwHbu3b1/344Q/PjxCBNUH81A/Xr27d2Xrcw5L3n63cXXr4+QgIED/f3/BzBAsgRSYAADD0QwQQUZGHAnvQZq8D78yJNwQvAQImGBADbksEMPPVygwQIVJJHEBCJ8ED6vLKRQOxYvFOCBDzn8QIAPZuRQRAUtkKBHHyWwQMETU1TxM5lehBHJ7xCS8cMNNLhNAykp+FBHBB1AIEsttXQgwSE3U6wuJb2rcEwmP+Qgy9u2rJJIAEY0UAEItqQTAi9RjA+mMe1zcc8CzuywAS3X1FIED/+tNDACOheNAMEv85zKzTKVnBRJQDncYFCE6GygQ0QVWHRRD+B81LOgJO1zz0pfvDSADrYkVEsNPHUTzgpCXTRIA0sFTb5T/QwvVTNj7NAEWDelk8oNrcQS10VH5NXIU80C9k9hKSV2wzSPFUDUHGs10IJFMZiAtwwGiNYyaX8FdlUWL82U21A7WBbcAeSks1zeLkAXT1PZ9dNdCy99dctyJ1h0g291glPRLcnd7YJz0532Xz3bvdbSbDeMN0tyMVi003oZRlACZ7OUwFF/e7044IxZ3XjDk7PkgFaSD2zW2S4PpNjXMFfEuNpWA9jW2UPtPfBWXCu4E1VIIxX65XeFY97QA2eVXRgnOON0dut+nbY4qqijbrJDCnCd9eibr4ygbbcj2FllsFkWO2iMyzabAr33pkBkm7UuMXC5wYyUWrtdxhtHxQNAVPASe/553ZZVlXpgqhefsXHHhVxZ8rpdJhtzzEVUoHTTT0c99RJgo826irCLTeAJZccPoRFEV7wB1qcLCAAh+QQACgAAACx/AGoAqgCIAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2ef9AADI2dsBeXlKQInR5eVDNn4saGiysb/LGRmuJydMWFgsAAB6K2SRNjZvR0fBEy0AFhaqAAAAVVWSI1G2FzZmMnLbCxofb28wKF0/X19BIk9fT09/Pz+mHUOqqba+vMW70tLTDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wABCBxIsKBBgQIEKBjAsKHDhxATJDxI8WDCBBArINjIkaMEhwoSEghAsqTJkygDiEzJMuXEijBjygSQcCHEmzclCphZ8eJDBx2DIoBgc0BIASNbKiW5cqnSlzyjzqyJs6pDnVIN+nQYQWhQCw2PJnXKsilZlzuzqqVI1apVrGsRCsDYUIJXoTbFni2LdC/auIAHtnWLE27crUYh3A1agaFevyfNQmaaNvBhhYSrGl6L2MJioQ6MSp48GjJUy2oHZ364OfXcukIzTEhIOyEGDY8nUx6r+zRqqapXN2ydFfEAxR1n166NIXfvvrop/+aMWThr38VfNwTaUfZy27hL+/8Vvxf7dJnBrRMHrr2hxs8fHZM/O5+s+fMw0wtfH9W40c8QgFSfUwMudR9+bFVn3XAHTtVeQ57dFVpYBT4FXXQNIliQfqvxx5N/iXnVmIAXPscbaZVpGBOHmXnoIF0LylciiiealqKKPSkYo4voPbigczRGJx2OK+q4II9FwhgjkDbWON6NRGplpHoZJhkjhTM2KaRKUEZlAGAG1KTAmGSWaeaZOn25Vphznenmm0clsICQF82JoQBquvbdnnz26eefgAYq6KCEFmrooYgmquiijDbqaEJ5qsVmAgRUaumlmGJ6wKSZdtppmmBCeoCnpFqa5qilkgpqlAnaOdkCByT/5CpkC1RZ5AGz+lWrALhGtyurOW5JQEJb2ppflk86WV6XwNIkgLDE0slsf8guqyx907IabXTDPiutZRW2FC5fzVrkLbfbPgdutdhea1+2UaY7WbfFwtujuwSy+265UkJ7rrqBjYsWvgbai6O8kNH7bcD65kuwhfxu+O+8CD+57sPiNlxwxARVvJfCd15cL8bkciyXv/WKLC3Jf5nsLMoLAyYwSjNHZrCKHp8FMsAya2whyzTfrGHOZO2Mosp3Am2zyy8LabSNSJuYsstEO/W0xQwrbVLNWwuNYNVLXb1s1EHGzDHYSomNLdlamh0x2i2pvW/WI09tMtwsyZ0v28na/332xAnj3XLPWpfEteFeFxRpVmwKuYBO9S4u1aS5PimntJJ/+OjmnHfu+eeghy766IxmzhOnqXq66UWpe7rqmqK2/mnssmP6+tsCVH4WrLL6amyOveq2a/Cv/v414H7pvTHdK/uNO8whM5+08/wKjpLyEEsvtdvVI/+x9UtrX3b0d3uvM/hd820t9eWiXxL2GavfLvvNuk8S/CUTXjf37ZtftP1ckt+7Cjek8kGPZ5chIJcUaDz8ABB/g0vg/sj3twMeTXxto+DznAbABkqJgT6LH9X8Z7UOJu6DE0TgBtFFwuXpr3n8q18L02ZCATqMfsB6YA0x2LcY5nCGcdvhC/+n50NtATFvQpQgDDXYPQtCjYfrK2K8jpgSCAbNhgUDIZhCRcWTPM6EppsJ5aR1uTuFsUikS6Ma18jGNrrxjYI6Y0zYJEeYoK52poIUFw2AKjxW6lR+zCOeGJCAQhrykIg85LrWpTuy/IphxKNVQiKpq4Qw4EoDWCTDcKg5EIJQSdbRZM846SBP1guUDHGAKlepyqJkkmHrIuW9UogiVHJHKBFwiCgvI8simZKMEOmKhBqyS+r08li/vBMo7bIYojCkmK45Zo6S+RwlKQA5iwHLK3sWSynOconVdEiEPoOAhUAzO9JMEDVrGRZycqQx52RPOs21ThvB6D3uLGc8qTX/TxSCk50awMCeJpABoRBln5rrp8TqabkBCJRPExDKRxDqIIV2LIR8+eQAAjrQgnYkl9u8TDeZmB2GLguUwlzMhCjaI4sKBqMDO+VDPvCZEYWUOiNVoWtMii1U4hMvxITlJr3pS1raEyIKiAAElspUCIxAl0IdJVGRadSGXomlRXKpXHj6LlSuBqvH0qqzuJovr2YGrMGa6jSrelJM1rEidORi5LhYxmo2sqtwyutR4MjXvvr1r4ANrB7jckc/rm4ugfzjYGGHpz76MU2ETKRkJbtY3N11KbzLne9OKDFK7mVXl8TkUZimwy6Gb4jhFK0Hp1NasR6OMmbNzGhH6ESs/6GWnarl7G9aq1Z1yjS3pDWtSax4WiUqE5Oi0S1qeEtS9mgUuLTloHARh0ULxZYwszWgdF0L06Bd1y3ZreB2e0vP314pvCvUDXHTB8WeIhe9TRxvc6n13POudrfTvV8SqVPfJd13ufkNwHqp296uvve/OaVYgBE8VvP6V7kJDtyCIdxgYEJXuyzkbn9/xGCpZpi8/jzuhcX74fl20sEcprCH1bvfnaLYOvDtX23HVuCyHli5b6VI43wFOczR9bIFS4Be9VpZ9gj2yEhOspKX/KccHySuhC2yl6Q8ZTzt8crlQ6u5tLzlqP6NyxIDc5i9jDsxv9TMZyZz99DsLDa3Wf/N/WMzg+dM4Q6LFM7UcjOdgyvnOvvZzdzEs+b0/OcR9pnQgL5zoBXNaJwK2oiHfrSDEu3oRVe60a6hdKYlXSREc/pghcZ0djyd5U/niNSilmGkLX3pVq/Z1AlCtavjDOsus3rTt6Z1rvNca1un+oer/vWge32ePe+a18ce9pqwzFgnK47Kp4O2GKUtEyjricnYzra2t006Zz8bsYk9LKUSezvGUXuO57ZjunGmWeHFqt3FC7WwJx3dEut01MQeM4ZZPGFZ45rE/Oauv2Us33uzZ+AynPHa8n1RTaOz4BdMNr0lTiTmGhzZ8+5RcBU+N4p3muEA5vjeQC4Xh8sT4k//9PixTM5PlNs24x9X+dACPOACwnzlJB+qvSN+81PnfMUKFnjO7WxMl9NY5rH+OS+NvnCk+3rWCWd6x3uedKcXm+YtxrfV9Q3woIOY61DHeHq9buKJU/3pk5Mrj8HIbEmtW8dvlxK35073uts9UN4mSGHxKO7EEqDck4u7QaztdiuXD8gtySziWfLIrTdc6UXfecrPLiWWJ1TqI3d8mjUf4eRl/eBDV/HSJf/ysCub8qzFer9Db/mKYt6FqH8853Ue8K/LPvab77qEhT57okfz9dnrvbzHvnvb5970Zie+51cv/Na3FPgibD7kf0/6o+O+5NN/ePWbfv03zx7oxS+7/8ZZX+/aiz/m3fe99s1/8dMjf/z7Jnv7k//v94Na5LC3P86/P3r281z/Pqd5eTcQOyY8PWZGbWduhtdsl3F3DviAEJhtAygQe1c7fUdugvdtEwgAhKeAG8gwi5cSirdZCCd28bV9UweAVdd94Ld8vJd+ohd5/jd5Koh2Wqd7Lmh82Cd98Rd+8wd/MLhx0Jc/QZh9J4eCmVeE/CeD8vd/9feEN6h838N8SsiC/deENAiFoLeE1DeDpaeFJhiFJ+iF1leDlWeELYeE+QeG7seGU4R/wVeFZph6cBh9cuiGFad6LziHtyeF50OFfHh8eHh/QxhBgbiDVsiEPuiEYtiIW/8oKVzUANIlibrRABmod5dIgJlIgZsYgZ74iaDIVxvYgYG3gB6YgKU4ipu4LgfQiq74irAYi18ySbFYi7ZobDz4NyGIEiMoPLh4hwSnhnF4iN6XiF2IhV/oiGH4iH74P4A4iPtnjOuHjGUIjQEojUdIhtxHjL/YjCX0jMrYhuH4hoV4ReTHhdO4iFk4jkBIjC04hXtojSvojWHzect4j+IYdcJoh9w4fGNIjdsojzbIjP+ojslIkOyIfvRIQ+CIkA6Jj+S4j0TYj86XVeVYXBSJhpcnkYYokGeIjtkIkCnokWBHkvhVhxNpkoKYkDoWiZMYHZZoiqcYZTKZig0Yijj/mZM6+SiqWJNVNoGk+JOoKJTlY4tG+YqzyCtHuZTdqJKIaFm+8m676EVNyZLXuJBB1JAQ2Y5OSXsimYQZCZJpqI0jaZXz2JXv+IfxaJYD+UMo2ZFs+ZHYOJZfuYZxWZJ3eZIXyV656I5XaJDVmJcr+ZBG9Jbm2Jdo+Zc5eH7RGJY4CI86WIyOiZVIpJX5uJUKWZCL+YOZKZhPqZmQyZhX2ZXqF5KAGZCeKZmJqYibyYiESX+pmZbOuJavyZXBSJZgSZr+eJt1OYy6iWY9SYn8JpyQEZM9eZxD6SA7uZzM2ZyAgpw0CZSrGJTR5pPV+YE9s5RHmZTaeZRVWZudKUNT/2kSvRhvJXiZhbmXBIaYsamYocmZjfmb5deb/CifYrmRuGmX4Bmf7cma7+mamMmf+zlzhomR9jmX+EmfKZma3+mW6mlzk7max3iaZTmgZ9mfE9qa62ihbYmeEZmfvsmgu6mPIFqfIlqRYfWgAXSOCOp6HHmYwMihV1egfBmjAUqgKtqgHmqbJKqgcCmjeAmkevmiBnqiGumiJbqgQjqYNzp4LslCxOkXxpmc02adVTqBzpmlWpql0MmAXvqlhdelZQZJ3XmLI9qkHTpFjESCLOqX/UibaCqXYyqbuWmk9wmbpnmQcRqke4pfnReYS/qZGCqicLqj4QlsXrmhfcqkhv/Kbn+KmoGqmoMaqfbYqKM5qYt6MpGpo3iKqHSqn5kqqZEqfIXaqZZ6PI9aoaHKqXmYqnVKqWcKaYmqp6eapjzqqe4Jqasaq2o6q4C6qyh6qXn6q7UqpxJKqJvKq7cqq58aorAKnFRabQkRpXsxpdEZrehmpWi0pdzarZ4opjMZptiqbtqardh5GWVai9yZrrHIqodarKk3niVRnpKkrO9qqh/qozAaoZiaoBT6qsB6pM9HpDXKr6Oaq7OZrOeZr/8KqvDah8c6rGqpsG0KmhMrmhf6rPPZsM4asHc6sEn6ox7boiCrr0WqsR9rkQS7njb6sCGXo/YqoKHarFlZqsv/iq95SKMsa7Azi7DfaLP3erM5C7MLK7SEuLIQeqBumqH/qaguK6gH658XC5/COrKM86TDCZPTubXlSq5Y6q1gG7Z1B642CabhKq7XWmqc564y+7QVi7It26trG7NV67bsabVK+2pzW7RBC2rBxrN2G7d927Y4O7gz97d526+FS7iGm7F4q2vXx7Z1u7iHS3KS67iBC7hGy7iNa6t+a7l0i7mUi6qgy7ec66il+7ZRm7iPC7dzGrmh67mnW7l7q7o9q7mda6x2+mWpe7e3y7pue7my+2TjClebSJ1XWrxw17UJIrbO+7xKRrZEibaMVQDWe73Ym73aa70Jsb3e670X/zFCDDC+5Fu+5nu+UBu8AvC97Iu93du+7SsSfMSurbgurrQgDIBgdAa/8bu+/Pu9CXEC8hoA9otcSDK7CfK/AOy/Cqy9CfEA0VHAD1EBEVDBFhwB2nQV+utnDby979vB7isAEIwSHHABJnzCF0ABJiHBDRECKnUdELa/IBzCMxzCI2wSG7AYK8wwrnRNiwFSDBLDHFzD3MvANfzAJ9EAn1ECJcHCAzBOdxEfDHHAk9sjRFzEV1wASGwSHEBOlMjCCuBOzjTFG7wuWfzBRyzCJtEB7nQBJMHCP5VNQRx6Z2zEM7zFJJEC+aTCEnxL7mQTVCy6VnzFaHzHakwSOZxPHvdAwDzMEFC8Ed6xJxowAIE8vAlMyHYMwnjcxd2hHMsxASIgwWEcFJ78HRhAyWXMMHVcxzfMxsnhJyzMTBwRyctxG6gsxGaMyazMxZ3MJyDgxNh0F0B8y3Ssy5h8wyThThvwxo28HeTkSpWsu+yxysd8Eomsw8zcM/cbxx1BAjBczERcyJp8yCbhAYuhwtl8Gfc7yl5xv9HMp5pDzeFMziVBAXfhxk3czA7hABLQz/4sAROiwbisysY8z8hczxSQ0ApNAcTpxEeSyj0jz2l80Hvh0FQy0BFd0BMdwfq8IxB9GRJtyBR9Fj05ZHmFAsfLtSE9ziYQHQEBACH5BAAKAAAALJgAaQCRAIkBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/4AAAF5ecjZ20pAidHl5UM2fixoaLKxv8wYGK0oKEtZWSwAAHorZJE2Nm5HRwAWFqoAAABVVcITLbYXNpIjUWQzdDAoXNsLGh9vb19PT6YdQ6qptr68xdMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/ABMMGEiwoMGDCgQIAMCwocOHEAEoVHAwAYKLGC9GMJhAIYEAIEOKHEkygMeSKEsqjMgSosCDMGEmXNiyJsOJBytk3CmhYEcBH1MKBXlyqNCVNlu+jMl04MykNXEWdLBzJ4SlP4MaRVl0q0qaUF02Hfs0bESpBCNU3WmBYFavXIHC/WpW7FimZes6RDtAwtqqL9/OHdl1MFGwegEsvYsQaeKbAigOTADh784Kkwsb1jzYceLFjAvmfYzWguWqDgYINnxYK2vPekGHdgpbL1q/OzFMwABYNee5v+HWNit79ujEfCtnnCBgwuXMclm3ln74McPioY/bjjx1p0IBOwMH//c6fuvwsNgZa6/Ld4BOjN8z9oTuenN06eehpr+73mx7i/AphBEEPpVnlIFD5ZfUfmQpmFR7A5h2UXwXpeYWgkfd9xpisc3WGIfbSeaTchRiVqCG9tXXGYjEeWhQf2FBWNB3Ma22oYrAsYiei6I5aJOMBNEIk40pUmeSjvrxSBCMUAE50AUCXFAjhilRGZd1iilJG5JNcsdUBhdkMCWKK+IoHJc2JaDmmmy26WYCMxlgnQETvWnnmxMtQF2eewogJ2nfBSrooIQWauihiCaq6KKMNuroo5BGKumklDaqAAGYZqrpppseQGdknIYaapxYfmrAAaKmmmmcqKqaKqmP6f/J2gIHKCSrYQv4+KMAB9w6WK68+joXsFgaSYBCRuoaFZk5mkkemjUZi2yfWELmrHnMnlksdccKkCy0u157YLbPbitdt99WKxG52IqbILgsSesttdVa+ZW7GZrLGrr0YmkvSf8SBm9E8qZbL7vj4lvlwBAV3K91AYsUcUjKEszttPgxfBbC7yp8pXUOZ6zuxNM9rFfIG47McYYe0wXyxfOKfHDLAK+8sL6G8SuzvzbHRbPAOA+mc8ozf/uzxBo7hLJ9Kh9Ncc8uP7b0ik0bbbDUMF8NMdQ1O13dy+diTDTPXh9ZdsUNZ20yclwLfHbSDU2dY9V9vh00XAvM9O2fj33/qoCwOf7dJ9/bVWr44YgnrvjijDfu+HeXuiqqpxNJLiqsfSt0quWjat4q55pirhfgXtFqq3TEHtwr6gqtPivaD8mtbdF1a32y2jtv/bbdYO8rNtO0Z8w71mHHPLbuVq9tluzlBn+j7XUxjy3dwkO/PO7HA7q79WFJPy71zyvfPfbAk5187rcXz71/bSM9fGLevwt+keJDFX++ztOPfvTkU51/metLyv1u9r9mBdAmA/yY+WpXPwH2b24FPNP70uc745UPeQzc3/XU18Auba+D0Xrg7BZYPRC2JIFRw2AJNTi+WeltcKXiU8YElzHCsedxOMyhDnfIwx4uzoZ18Rvo/zYlOr2Y6nNDJACrkrgqzTFAAVCMohSnKEWVkW4rqeOZ63DVuisaBVgM0JLKDvig9j3tbSJy0RhNuKwPzhAmDoijHOO4mDWykH1u3FAaB0KVtWyEIHbMHtvyaJ89DkAtf7HQAAJ5Qe2dT48Gwc1frjIQRvqPhOF7o1uUY5m2LPJgZAzXIwtZEAmdBgECsSQEMak/SE7mlBjBjCpHqMJMutI9sMRIAmbZPFYCsE8UAROhdlOVq/ByehF8FhoHAKVCOYcnn+RZKNs4yhUFs5mCImZG/njM7yWzXd/aIyItY6Fuyu+bCQunQUBwGhNFc2vTbAnJzKZOg7xnLUsxJ/58af9AYFYkAhAIqEAhEIKC6JOA/JRgPXl0UAXWspWkVFJDU+jIDN5SjaBkozzNWDJN8giIZvnU3mIYGS+Oi4Z6vJNKf+LDlrr0pTCNKeIix0TKgYqJStQcSTeH0zg9kYpABapOY4W6WgnApELJ4ta2+CuFhFFLP7kbXIbWyEEulEdR7V3OfnfJh1pTS76BG0NQWLP5fRWqsFOaCHvp1cCBNavEq2A8WTJPJ90FrvBbKzIT+ixDzgavFNyqBbtaUY9iNa1x06s3+Yotv4YGsPzj4B1jxNEjOZYxkN2gXDVK18radSyZbeFmJ+vBq7ootPZT7DkZe9K3Inasqt1nW8902bv/vhYAZAUaOt9VW9DeNrdIM6tb0SpW3MYWobPtq2uLC9ynCZe2y5WqV6hKWKv6k7jS3Qp1V5ncxkZXq0LjKncLe9HT/hZ1L6whSVFayJWudKhsk6l850vf+roUpGEx1Zzgmxj97tdPO1XX1g6msgITWMCAOjDPDLxgBLNNwQOGcIIdvB0JP9jCFabwDTG8YQ6zT8MdjnCDRyxiEJfRw5RlcIlNvCwUd0nFE2bxrlx8YhrPWMY3XvGFSRxjHHfWxi0G8kZ9HGQdZ5jHOyYyRG67LhgnucdK3ktxmyzkH1dZw0xmMpWRfOQoS/nKS57ylo0c5SyLWctoFjOXPwzmh6TZ/8thbvOX5dwQLa85xXSGjJO9bOY9h5jMUMFvoPkbREIXGsD/FXRS/Btf+zr60ZCOtKNomkSbUnqIRTy0om3C6P4aOixITYnpjsq6M5vaz6mVrCC7DOUnR3a0q/5zq1n9asHOdWOoxnNcbc1ZXOd5zIEN72DH62paG1u0vCZtje+s67yquqrHZjOzEXhch85a1sVGtrBvHedfn/fZ1Y12s6+dalhDG9vifjF457JdWpJ73Nkud7JjLW1Aw1uz8z53vd+t7l1vu9fdnvaynW3ucKP74Puu9b+VnWN795vg+TZ4wtM98GCzW7zujve9Ee5AcBOb4g3ndwhdiPFnbbomnf824qfzu/IySvrlMI/5oy8NOkvjNKeIzlzOPb1zlffcLKFGyaiDXhKlijzkGqe2xzMO8iI7fOQF/zjHH370eFWbokmveNZPePWyevvU655qyff6dKQ33WJLZ2vVh0zn5n5N4GafOtQjLvWJyz3u8l44vTdu9753POpMv7vT1562tJOd8L6Gu9UNv9iyD37riwe82iHPdsWjXfKHp7yVLV94zDce8QF3/OXpHni/U13zseu6bjmfeNF3nvSTP/vmXZ96xq+W9qEHfWJtL1vdz5n1aiX5sM908pakXNMBBpTMl8/85u+Q5pyzeU9bvmjqc9r6KMc+S4hOkqGXOtenl33/7T1/e9/XGez+vvjwYy/4yuN+9+TvPepzP3/4wz7z4qd//u2vd33z/f/hp23qx21uhn7vB1u8h1zmp2dtp3rB9XXgN3f9J3EAqHX7h4Dxp4D193sHaFwJaG0beH4RyHUfiHUXKIINWIJeB3wFOIKRd3+fF4IMyIIYCIPlJ4PAFoJudyQu6H7mt4NvBoFhN11jF4MnOIMdCIQGuIA5aBYNADNPyBoNoH0scXwhRYURYYVd4nxc2IVeOCnFV4VY+BBaOGg/d4VnyHJpmBQH0IZu+IZwGIdy0jpxWId2GIQ0iIJENStGxX0jYXQ4iIcQN4F1V4F4Z4gvSIilh4g+qIMO/+hcQpiHStiDs/eDj/h2HYiElqiCq5eJTdh+r6eI7Gd6FgiK42eD8neEn0iKJJiBIKiKgmhxYrd++GeK+meL/DeAANeCkTiIushwjweLzHWJPNiLm+iKJoiLHHiMqKiBwkiJpyiKtciKwaiMHoiMK+iJsahwv7h3AUiNjYhv0miE1qiJIRiFFYSOgzGFa1h97Xh975h98dhZX1iP9niPgxKGWTiGDlGG7qiPEOGP8AiQDmGHBvmGc8grB7mQ2/iM1uGHIuF9r7OEgTiMnPiAeWiOJziJxuiIFwmJGbmKjBiK3eh/3ziSrSeLREiL5AiOleiR2NiJTNiQAjiLBLiMFf+ZXUbRbqOIkrfoktFYkhR4kkRZinknlIVYlIeolImIlIvIlOFojRwZkjR5lDa5izjpkOl3lcAYlUD5k3/XjK9YjiIJlSTJld5olD7Ji1u5kjeph9pokTGJkXEJjcE3lyBZl3SmjsLGl3DBjgTZj/womPMohoUZZviYmIrJhYHZEAJpfIPpmJHJEI8JmYf5EAt5kAmZmQdZlWuZlaBWVKczkXbJlklIjJ5plinJjWhpkmqpmmCpdHiJiTNJkeLolD0Jm6bJjON4g1p5mh9JmzmZgrNZjFRpmzXpllgJl7Wpk0PBk9P4mcwJk2KZjF+5m9TZm6lIlqkpgbgZnboJmtf/WYPa6YzciZxWqZxd+ZK/qZLaVYS+eZ6lSZ7f2ZLSqZFSiZroeZ9l6Z2tOZSvGaBL6Z/qmZYDeqDVCBV+OYsLuhWAmXw615iUOZmLWaEWCnMSCgCVuY+XSYaTqaEfuqEgRoecWYfdiaBeSWSjyUX7GZ7TyWcsGZ/j+aJkKU3LiZ8ziqMqGqPbmaP9iaIjyqPm6aMnymLw2aP8WaQmdqRDmqQtimNMOpZE+qQyFqXW6aTzKWBWmo3N+Ws2up6r2Z5wtqUyOZwh+aUGmqBYCmdS9pY66qJvCqVCKqVreqbwdKM/qqZwmlFgGpsCqqdy6qZ5mqJ7iqauCaRhWqN32qfY/ymmIUVSDWoUD5poEMpzEnqhmJqp9JWhIuqhHUqYnBqik1midbiZpBqHSsqe1giRISGRLJqlcYqod1mdXGqmwFmcqZqoMzqVeimJ+gmrg6qq6fmec3qle5qrs1qedHqsVNqU/5mUf0qo0XqWBXqogCqrjZqcxCqoyJqtw7qTZEqXXeqrwWmcvXqrtFqmjpqf5dqt4imdvDqu6Kqsxjqtugqvv9qRG5mvx5ml8WqrvFmfMlqnHRip4CoABisUkxqhlepzl6qpEBuxPRSqnzqhFWuxFJuxbEqjBCuvbNpn+qqoC+iuHMufGUmysXqtRjplKBus9+pjINuvXpqJLduyJ//brNKKrQgWs+fKhDcLrDZLszgrrCYrtEA7tFjGskjrpzqrpUp7tMD6syHro1Irs3Y6skvrrUWLtVA7s1w7tVsrgzWbtTv7tGDrohrLsJS6tmrbaBL7tnCbOGlrqSRVAHZ7t3ibt3prtwqxt37rtxOxsZDBAIRbuIZ7uIjLAGn2t4yLt33buI3rEadyqm2oMgzCFKhFWZAbuQKwuYyrECTAqgFgudh1MJ77t497unqrEA+QEh4gAB6AEqRrTxFQu7YbAZ50Iaamunuburx7t6xbEhugAd+hAcZLASIxuwQhSahxIirzu3nru9AbvCPBARMiIBiRvAezGJRhGX8EHc//C73A27niy7cC0Loi0QABAh4YIQIhobwRckrzEVbhW77S+7vUGxIbsL47EYXKCyCnQUn0a7rlWwD3y7v5GwAdkBEUghEaABLKe0+n4UmZ2yUFbMDka7/nKxIlwMDYmxHIO7t9lEsCUcFldMEHrLr5a70ezL4Z8QGju70DYUoYoRuFkgEmvCsonMHim7/728Jr0QGzC8AYwRyFcgE5vCw7vMPoq8A7wRzPlBEbEMM8sxQ0jAA2PChhksQbtcQFnMA/fBG6wRv9S8VbgxWc9BfuxMWd5cUa3MQgAUscAMEyTBAj/BdYsbhfzMPTu8EiwcKW8b51nBad5LwErMFMTBIfZ2AZyEvHVcwRk8S9eozIewzHIEEBf/HAgvzIBuEAEvDJoCwBigS+h9zDfIy/fjwSFLDKrLzK6gi/HsLGG+PGpmzJcAHLfzXJppzIrIHLj6XLfczLhkGx7qVSonrKCIzMKiwAIyAdAQEAIfkEAAoAAAAssQBlAHgAjQGF/v7+RzyKAAAAAICA0M3cOzs76ejp29nn/gAAyNnbAXl5SkCJ0eXlQzZ+LWhosCYm0BYWsrG/TVhYLAAAeitkkDY2cEZGwRMtABYWqgAAAFVVkiNRtxc13AwWMChcZjJzH29vX09Pph1Dqqm2vrzFu9LS0w4hAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ADQAYSLCgQYMGBAgQeLBhw4QLDxCYSLGiRYsJFBqQeLHjxYwLHYocSbKgwpIkFR5YEKCly5cwYS5QyTKmzZgzBaDcyRPAyZ4HFRK4SRSm0KJIA/wEyjSozqYEjyYlKnWqzaVQoWJtWtWqUQFDvcbcmhUoWbNgxY5Nq/bl2bI73/Ls2pauWrlwUz7NaldsX69484oMXPKvVcNTCQt2ChdxUsdIFS82uVcr27YtIReVPHkgZ4eaqV7G/HlyaadhMSsdXbdyZ72NWd+V7df168G20aYmTRtw7tuMy4a+Ofzqb+CUY+9uvXw2ctjCex+Wnvj4c5/WURZf27z2ddzKVWf/pv44O/LTBrd/7e77O2jzetlPl189K0OmEO/jV5igJu/+4mlkmUIEFmjggQgmqOCCDDbo4IMQRijhhBRWaGGCCXikIUUgGcBAAiCGKOKIJGrE0YYedXgiih8J2JR/auXEwAA01mjjjTgqQJNqOa3EI3oDibdaAjgWaaSO5EWW5GbwOSQkf0UqIOWUU96IJH3lYalkVk8KQKSVCIQpZpgPWKmeW0uKxmWAXuJYwZhwSmDjlV1qyeSaqkFpowNwwgmBAjXSyaadakLV5Zc1PtAnnBYEeqZLj46H5382SrBon4AOIGieaRLXZEOHBgrBpXBWQOOmvBHq6aStIWoBqX06/6BppKupahyrsyFqKZwZTJABprN2apyta+FaG6IDKDrmBAJMwOiptEb76UGh1sjnmASO+Se0wnLXpbG+ITvAm2JmK6ac3BK73reGsimuAtgqJGaZjna7LpvgTifuAK+GaS4CstarLpoDQzqtQdXO+QAEEBDIsKlzSluwpO1yuq+NBB4pMbtNJVxkxlFujG/FlBo5AAYCYKCxvQRzzJTHOGqAgQYrT1yry0AdSuXOPPfMH4y5Al2bfj1daPTRSCet9NJMN31ghix21KF9JkYtddVWY+QiU0JblVNsPmLWY9dTfU1ya+ENinNPa+tWZ9s7wT0XywbbDCQAcsdF93h2H/9cUN7a7X3zyB0TblnffedbX3SIAz6S4/G9bXjOkzMlMqeKl5c2p4mf7RzjkmPuuXegq105T5CPdDlvmW9ZOuepg3p60YJL2/qdr6caO7Wzz9147yXt3tDqaI/eXu7MCf838IH/LnrhnBLdE0QAWkx2ddLr7fT23Hfv/ffgQ501h1vvF9H4FamIPvkLfUji+yNej5TZfAkQdltj/yjAjCb3r3x6tQvgxfpXo/9Rxnklu5EDFsjABWaKRgaMSgD7tq9r9YleEGReYSYIs2SRKmADiKBnOOiuG+3qUtsKoQYjZzqLzWlUsGqUCp93uNAlkF+wEhOgRIgdBLYqUDkUk6n/eEi8XNWIXEFEgAKISEIXygxBvurTn5joQyOiLEHOitMMWYe82VBwAE88UBTHRC8q2vCHNlLWBzNIQ6408YYDAAGsILbF4tWvisdyE6keaMYWwlFTC2OYICEQAhv1EXYlJKCRDqm7RCoSR4xMniMfacgVqu6NaKRkJaNHNS/JT0nV+0/PRrkz8JnylKhMpSoJJD70TQ0q+VnR+DrkPvjZEkTl68kniUI/y9wvRgrhnyYj5jeCRNKIwyTm7Qp1xw5qEknLXFUX85jMekXzVtMMVzWtabzpbO6Pz7zbMam5TWh2c3HNnGQyzQk9LqbThdsM1jWLlU19xVOe59RcPasz/8BHsvNllsTNF+P5T8q10XICvGdB2RbQ9wy0nOJs6PASStCIHtRt6hzmQlEnUdQ4k5IbjVtHAfjQaoYUJePUZkWLGaSRHvCj/rSoO2uY0XCyFG9syt5OqLdL0SSAlECdUi7ntsqiGvWoSHWQTkuSn04ulalD5UlT3QPA2Fg1OlQNTv2uutWsJgerYO2qV0fIVcuU1at3I2tYzTpWtYqVrW9F603dCte6yvWsCMUrVdOKHb3Srq10daNf5wbYvq5VsIfNKl8XO1fDtpWxg9VbYSGbWLRMtrGUjatiMcvZyD4ns3bNK2BBi1jN7rWzlf3raFFrWtU+lrWhtSwsYRnVnf/WFiVTbUpu0ZLU3vr2t6hs5SxvC9WQ0Na4uiXuSHpqk16WNrauBehFo/vc6jJ0uoRNbXbbaUfobte7wXOpBD27wXl+hbzQaS1KxRtY0ao3vNiVrHbly93PvTdw6HUSex0LXvzm03X3Le98ScLE/ILnv7gLcHr7+7j9kta90p2pdWWrYP3G178VPnB9SZfh9xiYdxcWcIcnal40fZjEA25wiBc8YepeV8IQbvF3Dcqbp5Jkt0DB8fSUixvg+vjHQD6acLP2yuQi13w2HomOd8JcnDyYwgwWSYFTzOIXd1fG9I2yhWEMZSxjOMJXjrGYu2xl+2pZw2cGMZddTOY2i3T/xZc8sVbBbGYvizjNCHMwbO0sZT3LuaoIZiae58znLYfZzTMe85vXnGhEZ5nGh2azpBu96EhT+tKPLjOHBw3oES8PzmgudKg1fTxPd5rTLQW1h6kc50BL09RfhTVOOXncJItkybY9Mu2CzOte+xpD65tIkZHs1GK/SH9/jjWqZ83oTDv6y5Cus6IxDW1Se5PVo362iptdbW1XmaOqRrGs2wtubt9Z1KvecKmXTe5Jw9fc33Z3tystbW+3WtZTHjd/o71pdIt72flm976tjU6BP5ne/Z62s+W9bUsvnNrnJrg+9X3w9Yab0Aqft8XhfW+D3zTg/sY4Txqgmgbw+NYn/3cIrnGbcqf8+uUwB7KtH9Lyg6y8uDOnua55coCe+/znQA/6ASr+8KKTpMkyIbrGjd7wejM83hDv88VPHXKqSxzAHk82sx2+9K5HvNxc//rTO87vdVdd2WffutOjTvaMNz3h9s722GU3dbS7HervDjvevb73t5v97m2Pu5r1Hvi5p5vOcDf8v9MOcsDLfeOEfzzT+y51jkue74WH/Nonn3nO093yh097uzWfeLZfXuwoITlmTL5zqdYcIa8vyM0vGfPa2/6oOYd963Ode933XvaxH4jQhy90paPe8wdB+kuc6/jQNz/PdR8vtp0P9s1j/vSUN7T1j3996iP874Jf/P/zPw168Ydf5N+/NsX3fPUEZ3360C8/+k3vfdKDX/HzR378I19//eefwH4Gf//nd+r3fvgWgOtXYgajdaPnf8aEgAaIePdHf+aHfw8YfQ3YfRVofwUoegN3fhcof1YHgnY3ciUXfAMxe0qGggCggu9xezAYg+DzeynIgi6ocjaIgsS3gz5nfNmHfQ6hfC7BfCQofQeIgR9ogSWYfgXngT6ofaXngEtIgeTHfxsohUYogR04fiOohGoXhRo4gD84eNs3hlcYhiFohWLYeWiYaiI4hViYgQTYhFwIh234hRMYh0lIhWlYhmzIfYBYeWrYhXxoh3M4cRHIeBDohB+3iHX/mIVlt4VFKIdmWIV+CISY2H8AiIRPmIkGoXptwXo0WIO7x3KliHMDIoOquIpKM4o3qHOj2II5eIojwYM82ImaGIhBiGwCSIhY2HiTuId5d4m5WIm+uIlvCImPSIlQmId3yIyeaIlgqIvReIyHiHWMqHXA6IXQSIbTaIyGCI54KIncKIx3uI2FqIzV941/KI7duH/EeIbUWIzNSI7p+I5r+HmDGI7t2I/6GI/5SI/V6Ib7qI7lKFMFiY/WKI7oqIcICZAL6Y8CeRCgqBaiaGxGFouvCECs2JEeSSGuOIsaKZJQYYs7iIvyKI5C2BJEeJDst47OOI8pKZHwyI4DaZDp/9iQz2iO1+h+2diLBAmR/HiTChmUNjmRM3mTOimTAVmTMemOPMmQjhiMD3mUSYmUTSmNT0mTWRmR/2iVXTmUWNmHYOmVYymWTmmPDvmSTIiIP3mEyViUOPmLU+mSCkgxCQiU40iHVMmWqXeCtLiCgYlyg4mDhelyH5mYiqkgIXmYNkeStbZaDBiVXCkYKBmWc/lZfrmWlzWZl2mZm7mTfJWIy0iZr/GZaImZeimXrGmanYGamcmUoOmZoXketQmVo/mWpHlatDmZu2mXkrmarnmVpnGblZmasymcsKmcxkmUefmcY7Wcw6ma0KmbORaZ2JmRGMlbi9md3tmYIykABf8wnuRZnuZ5nuOpEOi5nuvJH8HJAPAZn/I5n/Spnux5n+Rpn/h5n0KxESYJdNEhTCaln/vZnuJZoOypECSwktHRT4qEJAiaoAcaoeepEBGgGg1aJBXwABzaoQ8gQ/VCoehJoCKangJwoTHBARewoix6ARTwEhlqQntETCVqniRaohYaExtAKjBaP+/SAaSCQcFSo+V5oyKaoy/RALAiAi4RozXSL2vELUSanxNKpEjqEhyQQ6rnpJoSRCkEoVNaAEZKoVcaAB8QRBfQElyqRrCCLmA6pWMaoWVqAkn0ok5qQUEEKG9qpVVao1e6o0nUAQHgpEgkJr2SIBqwp37apzjaeqIB4AFY9Ct98gFOCi9wwiwJggGK2qhhKqaOuiBZNCYcMKg+aiNQGiaHeiAzs6lHyqiteqGQCkWSOiYkx6UKAEOkQkesSqauyqso6hJBtAFqWqqVkkMPtKty2qvJ+qstAag8OqyWsS9s+iwhGqZxiqBl2hJAeikvCq1uVEEo9EBDaq3Kiq2OChMUcClp2qTEqkAS8K7wKgEgJKXk2qnZ2hIUkK/6mq8VyaXrdK0FCrD7ea9W4a8aJbD4ibD8ea5tYbA2Za/lGrAMqxaNWQJBFVQKK6EQOwKqERAAIfkEAAoAAAAsmAAaAMUA2AGF/v7+RzyKAAAAAICA0M3c6ejpOzs729nn/gAAyNnbAXp6SkCJPDpIRTZ+KWpqeitksrG/wRMtzBgY1+XlLAAAjiVUsyUlqgAAcEZGABYWUFZWkTY2tBg3ZTJzAFVVMChc2wwXqBxB0Q4iH29vP19fX09Pfz8/qqm2vrzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8AExAYSLCgQYMHCggQIPCgw4MJFhYAQLGixYsYFQoocOChR4IRN3b86DHkRIwoUWosMCGBy5cwY8qUOJKkwwQLAujcybNnzwUHFub0SdTngoUpk2JceGBo0acBjgpoChWqVKVYKS6cMKCr169gwypg6rSqUbNQCSxE+xRpVqULCbAtGncuUbdvUy5MELav37EC5NodnHYt4Z148y4NfBgx48YBEiu2uNev5a+ABUOGrFbAZsmTtT5uXBcy6NCVL1/OvHlz588CQi/WfLg06diyKTNUvdp2a8KvTePOLZo2Yd/HhxNPzbsv69+Hg98mXhH5YOt2T09m3hzsc+iDpdf/Vp4b+1zzbLUr5t7d63fwc8Unp14ctvHr5GWzbz/gPXy08uFHHwDooVWgWerltV97/v1XVYDZ5YfaaOPdF+GAC3bXoINPQXiehNtRmJyFH2K4G3+YHcjhTh6mB+J6IuJHoosm8oWieyquGECLBr6oYIwRzthjjTfiCKSOPfGIoI9v5djWkTTSl2FzGyLJk5JVJdgklD0KuSSRRfbnJIdYQqVlVmPexeWXUp5YZJVW6lTmkyZ6meWad4L5Zpr/zUkXk2jiaaagdLZp456EkmnYdG3aOaijhVI3JW9wxumnmnXaB5ueN1aK5AIhwXZSbhrhJJypt40qW6kKtOrqq7DG/woYquMtZOutuOaq66689urrr8AGK+ywxBZr7LHIJqvsssw266ywDdl0UEJ7SXuTRNStVJO1INHErUEmZevtt92KRC5IZdkFlFCNXdUmVYdJBS9h7ko6VbpzyYsvW/vGt+h4mQoH6Z8B3zawg5f6dCZWfPbUME8Lw5Womgf3+e98jWoqXMEVxtnhxQJmLPCmIhvscVEJOwwowxMr3LLKHI94MlEpQ7yyxBWrnLPNMcs4s081O9ZzkCTbu7NjR/8WtE4RJ/Uw0kUv97LNSbe2dGQ3Oz011BuX3PHPV4J8odcyd220xmDLKXaJZPtsttRVP6302lGePfLb5W3NtN5W0/89ZNtE460f33L37ZngE8ZNeNZ6LV61a36zabfJiIeo+OOcRZ4n4B8qPnTnabOo+aCfu+g556aHrvbhjE7+detwo5321U03fnnUed+uOu2Mo1T4773PJvvPoI7+pKqhldpvj7Qmh/xkyp+6/JLPVm/99dhnr/323HfvffXRkkstQ+cOFC5x2pZPgEnbfns+qRK1JNP88497bvPqBiXA9FbV7vu97SJLAIN3ka2ESUwAjBfkWAcw1HUJd4M7VKcKt0AIJm54IbOXBFHkKSTxrnQPrByMNsifDurogw5c0uk0eEATrgiFrisb7PJGQgZRMHMMxFgM3TbDCLbwho2BYez/7tbDCdVQQ0CMjvEIlsI7rVBqR6RSEoGzREw18VEWDFEUKTXF8FTRZSBUYRZH+EO+VVCEP9JdEbVYRswF8Yswu+KTnkjDNqpuR3DkmRz/REcfhsmFisphBodIuTWS8Y9dtIsQc4fBsbEQkWbE4RjT2Ei2PRJRblSiIB1JyNc18JITjOQbN2nJTsrwk1C04+7yKLQ9UmySW9qiagCJMFbuLYxOhGWgZNkbUdIrVMJ5nmKidxv8RUiYeWGVrJYZq73wb1Dfi6Y0p0nNalrzmtjEFf22CZMJrAR92AKnucrHPvW9b1X2I1c5yamAA07AfwUkoG7mNRh9DdBE9FSXAOPV/84whaRNuMQiGrekxsP0s0j/tFdA56hLlhWUMAe9UUKltlA+NhRnlWRLRFE00bxV9JUDDdRDB7NR/nR0cB91WR8vSESDHvCkE0qpzi6qtZHapaTtgWmIZEo1mtouo2jBaXd0CiOecs2QlGwpRF8KT90YdW8rtRxQzSLU5hD1R0/FWlRhZNO5VJU3V91SVoE3Vr59VTVhDVRZt5rUQrrUn02tjjzl2tW6mZKHb0VoXEWzVp/+r65BZepcCTRYwgJWcncNXGPOepm0sqyvIXXoVKvCWMs4VmKQRSpBJwuVyvrlslrLLCoZqVSSChagrlSpX4VX2pueVqGpnWlkMdpar/8y87atOif0wgk/hjzzScb8EDLfQszxBNdF2UyucpfL3OY6d3vh+9b4osst3e52nPdLp/t4i86NyI+b29RudX9LlHXt757vIq9R9kmvvYqGK5DMZ77OqFmRcjZSqYRkJqlISruS1q06zC8m77jIwR12c6DkYCL91d+/7VCxo/XjgFfZYMT+15MBrqN+CWxLrIk2wxIO5X69WGEEJxZ0s9UaLy1DS4uVmHSx7WmKG7fivyyYLQVmKYAHqeEJhy7HUq2tf0OsYF/yd7XxPDCME1zCGwOow+417H2ZyGQbGpnESNaNkvHbYxFz+MVcNvCUrVhlJF5ZkVAuLPDYGktV/jj/zR/mMZGbfGYGZ5muYwajodw8OzjH+KgRNiKfwQZkruY5jmWWYp1x7OcHo7i+LKuxc5xcleKB+U/DzUpxk3PcHmUaK8rELTOdKZznmvrUqE61qrFJXWtN15zxAy83xWutdWZ3I+LCrjppLa1OV1p/6v2JAeMbbJ7YM15RZm89la0u+hp30HgFsY4x/DPe1VABDsi2th2w0RbLFtK03fHMrC0WBJj73OaWgHfWfOf6CNlSaSbhBtBNbw2kyHHtlvK7rUTurziA3vQGwUG9LWNw1/TQ4Om3VyQAcHpjwEhbpvKJU0dh2EhQAw0HeD8JDmhpB1ncJ1O4AjIOcAt0heNQ/803uyt+Kq9ggOQAdwACI07miYfwzZdW06EwTu8LUOACGp85wlvpaIrj3OJfsQC9KSAACjj85Cs3+E/37cF4+5vetkK3BDYe9UB/nNrjtvpX5n3urJ/b3lDH94z/OvS55dxlGxx52RdybpNDvO0e/nPKWV7MsLzc3GZHgMzvTvUl21yMfH+2dywgAQnYqvEmWLfapc72wr9Q7H6xlY0n73VDWz6QSLeM5ifNeY97HuQeU3hfMiCADGye5nou+s37/HaVbdEDGfDA6/Ge7NLPzNIWF/VtSV3MYtvs00rZNH58fadVO//50I++9H2F/KSUCta4FidHsL99csZa1vTjbv/yeG2TBJjI+Dqpl9Tkyy9m52vYiMIn+qNiIpX7PkKSFkvvq1b/tbMW9czTF9u2bd22f5vRf5T3f2D3IST0bw1nd0aSVQjYeW21gC5CQkqXcYOXdhLYJvYHe7YHFjyXcVsXgXoXABNoehV4SpyGGQwHcw/HgSeYgnI2bSy4fF/xdzCHAFynZnxDg5x0YTeIf+6xg+e2ATIoe2YBhKUkhNGGg11BdkbIgwjUgfbygbznJjpobj4HdPRWgoBhhVKDhZ+nc+7xgubGdE5Xb0l4eFXBhENmg09IhF5BAugWeOYGgWE4gx7of0mWhRuUgQiAh4Jngkr4hn2YgH9YhnAHFiP/MHcCgG5IaIhuCBVw6GCVyFAtBxZkh4cRtYeHaImJSIGbxYghuHiOtxCNJwElIHlimDdkCIBLEkWjR3qvODixaIEBmHkLwWIGCBmXaGFiZoo2E0Ws53qbd4sTkotDyICWgXu6l4wnWH0poRGikmvMNyjZCFzCN2rZ11vb+CfTN47kWI7myGrq82re943d1X23xhLgF37sOH66tl31OF7tAmzoZS/shxZSAV/xl14D1I9mMX+rw4xQGJChqIlH54daFnoKmYkWlXgqWIp9B0lxFna1p0cL+Sf554onSGkP0mgSaYYYGZKL9mQbSXQl2Ygn2ZEg1ZCK+JCbGJFOCGGE/0aSN+mML9mS30Z7CEmHNjmMslh1K3lLKPmR95aRIaeTRKl4QymHOFltTimVPBmVX9eMqVeVWZmQocSUW3mUeQeTLomVp6eL/MaVZ9mCPbmTRgeUDolnNfmVKDliaCaWv3iRZrmCc9iUeOmDiqOUlOiWs5eTf1lWgtmGhIl4MkmK9jWXRQaWVgJ8wYSNBrkX3bhM4ndd4ahzl3mOoBmaojmaykKNKrGZyYSaqTmP12WaGaGaxAWbmiaboGYiqAVbuElRt6mbA5KbKLWbHgWcv+mbvRlTxGmcx7lTwomcxTmcwZmcRbWcygmdzflY1ClW0hmd16lW1emc3vmdzPmcvP85nt2JVdtpneeJWdlpnuWpneL5nuA5neQZn+3ZOOuJnekZWvfJnfWJn/AZnvTJnvMJoP1pn/lpoAf6P7a5nwVKWAyqngm6GAsaod3pXr03oQMqn//ZoPH0oPpJoU7loQjKoSMaoP65oe6JogJKohIKonKFoSp6oibKoRbqgzA6o+iZoSzqoC7KVyKqoD/aojsqpDHKnzq6ojgKoUMaokcqowSqoUn6oUv6oj3Ko1V6oUF6Ea6ppbRpfV3qpaw5TF9ajWN6mmG6mltqERpBmmzapm7Kpq0mLdMVj/VzprFpp7OJp6BWpq95j7VGfiTRmT+hP56lfzaapUy6fvN3bPT/skCFCpJXeqhk+ZOG+ahLiahUGqkgmHALYamDGaUlCqqVV5Qn1KltKapEWqSSRYzQ0Rmeqpio2qGY6m6kenkC8KpVOKtW2qSPWauBhKugyKs5qqrhhpZGCaxYGqmSmZa3eqpPmqKxSpO+WkvNupdOCqXPypdTqZHIKqnCqqSLmUsU1q03mq3XupZ9GZbkqqvJ6pMFR3vrqqzsen9+Ga/fKqXEenCs6nb2mq+haq69aqylWq10Ka+aineO6qzYurDQapVvWakK27AMi6QOW5hUSbCROa/l2pXb6jGWlpmywqdcqqfJJ7JqarIVoXz495lv2rIu+7LOF6c2oY7vmGtp/5qyKEsR36R97VNdgPoRgmps+ohs3uqvQMqPi+p+/OJsFWmkRpuqT6mV8HaYGjuvm9qqakmxE3uu2vpocDmTmXqv/8qxXmuY9rexEouuHRuWZ1u1k/quZhuXPmqw7tpxF9u2dBuuAtWYTTus0Rq2esuQX+uYfguwhUu2D3u3crurTyurb2u3Gom3Ynu0dbt3fFuDW+u0f0urAmurktu4iRq4E3m5Qbi5jGu6XRe3YDu3kwu1FcuYg9u34Iq6gImwkrS47Qq6cjmtLva5tLusRum7hju7ryuuaUOZqWKzyqt9lik9pQaz0Bu90mtNMksSNLtrJAumN6uzOQsAO9tb7v+IveHrPos6tO1VtL8rkERLkJXGtJibtloLvxbZuaCHu+g7vPrKu/BRaPHbv1w7v1LLrMKbuYertmWruKt7uvg7tgacuJFrv2jrvwEbwMELwW5buVr1ZQMsv5pbvHsbu+8rwQUswsVKwQO7wSSMr1GbrlOLwv/bwYhrsQ+cwLlLu1frdi4Mwxy8qvrLqVSbtzZsu6OUwyP8wjxMv9RKxMRLwCXMwgJswUDswYKruoS7xDtsxV3rwH6pxCq8wKOKxD7RADgkxofRAN37ve24vL2VNGasN208vXAcx3JsPdvrvTmLxvRYx3jcmmqcxuhzAIAcyII8yIQ8EUxByIicyDX/7MWuu1MckciQHMhI8ciRDMnlyy5EG8FGjMVbwr79k0CNers0fL9MnL9gvL9Zu8ldXMpT18NY+8Ot67ixLK2n7MNczMAp3Mq1/Mq3TLm6y7km7LlQPMuAu8Jr28LD/MsKHMOwS8Wyu8pXDM1ZLMNbnMzp+7iWC8Kly8iyrMypi8BVLM2qjMsA7MQVPMqarMPT3Mzg/MzkrM7j/MXBXL/ofMGiG5Pa3ITXTMzAbM4nbM3cHLrGfMAzHM7vXMTwPMH+LMz1HMXMbLz5HIes7MtBvK8BQMaahNGD0cZ6fMYenb1k6sZy88ZzXNImfdLA0tEg3acq3dJ9nMcvDT2VHMmG/zwVMw3JizzRjVxUlHzTgzzJPo3Il3xemWzPtOvJT8Go9eS+26zT3VzRrozDAO3UAi3FoxvRmOjNpNzA1My2Ux3NB93Ex/zEDc3Py8zV7FzQ7kzRAV3MVo3PzhzC8bzTuSzPC03PBs3WVO3WD/3Bcd3UYK3Xga2A85zEX13XdF3OY33OeZ3YCY3QR1zYvXvYc/3Ub61aFCnXj83Jm23Kko3KsKzV6RzZd23YZS3adanBlN3ZYe3ZpT3Zp73P94zZpKvPbc26Wn3DhtPLjg3ZYk3Qc6HRJCbcwf3RLs284sTGI43SzN3czn3cfgy+MS2mK83S051MQV3IhJXdhJzTg/9t2Y7M3YIM1OINyEOdtFuN2OANI0hdFErdbKLc2Out3lXd11Pczprt2+LM2na92P8c27d91usM0X9t23uN21C9y1IN4Acu4IoN3F7N4N9d32hN4PgN2PTN1xO+uwq+26ut362ty59tyx/O2SDe3xCOzBKe4Qh+2ZR64Qa+4S3e4N+s1vlt4ji+36Tt3wwt3xTO4g6u0DyO12vd2zke4iiuxRHu4xoO5HnZ4Uwd406e3g+u5CrO5DMu40G+4ylO1li+5Ucu2APu1zAu0Vru3WN+33ZB3AzG5mjB0dd9p9DdXcot0s5953gOx3POx8gt3X0e3YCePPhU3oCM5pX942L/1dPcTd7lfX77aNaGHijtXV5K64/BaGKofbAWfemGl+mQTq9swelhFuCRzuVWvoSjWOTzfehNXuVdjRaiLnGentt4F+s1N+sJPuI7Yeux9+mj/dunjohXWOI6HuaE/do9weuI5utWW+upfuPFHu1IfuxDThTKzpHMrulRjYLPjuGsnuVTrtvXzpK4TuqgDuvdLuXfDubSnuSvjurDvuLr/uS6rhPjjpQOfebnDu9jSOzTvupC3uXC3u/yzt9G7trV7hP3PpbZ/um6rdJubhZw/ucwTfHXVed1nucav/FuuufU7fEfH+d5CvLYTeg1TeiFTuUGD/CPpejZzejifd6P/17uNA7Ky2bz8D3E/i7m877vjK3qiL7yQW/q7/7z0P7vQ2/si7jtUW7mU/7rCC/w//3lpa70tFzvHl7wJ37wIo7sJK71Vt/qAR/sUw/0Yr/1LA/sRV/2R8/zQn/2ap/W1Qz27c717i73S2724N7zwMv23v72ew/4+sb08a337I70cB/1ZN/jhl/1db/0UF74bW/3lJ/2ir/2jD/5lo/4gU/0eH/lja/yaH/1Xs/LO1/5SX/5n+/loQ/1nD/4HR7xZtEZsg8VE+/nuB/oFw8ZbZzxHP/7wF+OJD/yIr+n1T2yw0/86OP6qJ/4r+/4r8/8m+/2o+/81B/2xVmj0j/9zf/f+djfm9pv1Oa+/dUZ/vn+9LpK/t7/+Kn//KJfoObf8DSv5ep/+NfP/tbf/fZfnvE//+hfpQABQOBAggUNChQg4ODCgwkZPiToECJEiRMZVrTYUGHGhRg5fqS4EeRAjyNLgjz5MSXHlRlbjjQpEubLiTRDwowoc6bOmDh9FrR5kSfKoSqLsjzqMqnFoD8fNtXoFGrUn1MNWgW61GlNrU+7epX6VWhYslt9YiUptqNaqmfZXn2b1azbqnFzlqVbF+9conr95gWME21avjv/CrZL+LDhxYWZJgYwGCHkyJQlV97r+HHjvpyNZu75s4DPAglHwywt4DRq06Rb40y9emT/bNeqayfEnVv3bt69ff8GHlz4cOLFjR9Hnlz5cubNnT+HTlw2SNqsbcNOaED7du7dvWvP/l3894QJak//WB016M91DTCAH1/+fPoMwo/HD14AgQIH/P8HMEABL7uMPaTCYiAABRdksEEHA7gvv/ESQmGBBy9scAECKTNQKQQxBHHBCCUkTwAIQgxxQ83aQ+xDBzmIIEYZI6jAwRFJ7C6hE1+cUcYHbLSMQ888rCvBBitAIEkllWygwRtx3E5HB5FcckkgO1wRriGVMpLBKqvkwEkBoBRPSgYb+HLJEMTEMsu7AtupSwVDSHPJDhh8kkwzF+SgTibxDNLNzeCMSc4O//xUUgRAySxxRwUPRRSBCABtU1DMCCVKzggiTbJGBfOEcs8AROAUgR8/DdRSsDA1qssHSk1SxDEZzdHEBankFARZKxVUsg277LPKCyjo7QMIZ6U1Sls/6I2CC9K881he3fQ1yC7RrJJY345Nljspf6PgyzBRnTbLal1UENckhy2W2271O5FZ3pz9sklyt1T13CIb5HRNWd+Fl99IPb2XVVUnw5crOQNQN00xATZAVIYjZTPhXiH71cFN6yQYVYglDgAEP0/d1WJqMbbWQUi/1PVhgEF+Nc1JKzb4YH3dWljBB3bmeWd7F33ZVgd77vnnkmvOF2V0UXwQVBxBZvroFjUPXstkoXKO2uOgHc2aUqtXvBkxrLN2mkSoyU6VtPOwu866sZluoGwJEzqh6wbjbnu211ALCAAh+QQACgAAACzNAJoAXgBTAYX+/v5HPIoAAAAAgIDQzdzp6Ok7Ozvb2ef+AADI2dsBenpKQIk8OkhFNn4saGh6K2Sysb/CEy3X5eUsAACyJSXMGBiOJVRPV1eqAABmMnNxRkYAFha0GDePNzcAVVUwKFzbDBeoHEHRDiJ/Pz+fLy+qqba+vMUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wAHCBygwIHBgw4UDBSoQIAAAgEiSpxIsWIAhxAtaqzoEIDHjyBDivQ4UAGCkyhPVlhIEOPGlxJdwnzZcaTNkQM7pNx5YWHDhzNpAg2qsebNoyQHONi5E4RChjKJchwqlaJRpDcFVmC6U0PJqFVjUg0b8SpWnBe4Mn36MyPZsmPJmj0b0qTanRSgxg0LVq4Aujc13GXqoOXeqn35/gU8Mu1ODBMwrDXs9m1ixIsZhxxAYecEARO66q3sl7RizSKV7nQoIGUFtpelxiY6V7NAnShZp+w5+q1Y05hRbyaYUvfJvF8Py1ZOO7NwAAMFnzSOoHBy4MuxN3/+sSSFChVYg/8fwbKtb7jag9ZmzFIg6/a9z89W71w4/PfwzctnTp97UpYbCLABfJSddxF/M60HGHwebOABgfr5Nl+C9aGmwIUYZqhhhg4lsMB+Hu5XAHcFsGbiiSimqOKKLLbo4oswxijjjDTWWCFWJQpQwAEE9Ojjj0ACmYBDOwZppJFD6kiicA4d8OFbCzT5JFlR3ujfWRPClKVQVzKJoJZfctmlZltuVGZRVo5505kWsTmVmoy5aVWYZqYJp0hyTpSnWHfStSd6BirYJ0h/HpgehYNiVeiidiYKAKOHgunoUZAG2miile436Zp0ohmpmJvi2Wmbo74Zqqif1pkqmqeiaumqpLb/GlKmEl46KK2W2donrn7JSmipc8Jqqq85hlirsbmO+FyONjbr7LPQRitttKjlWOSR2PaYZAESJODtt+CGKy6R3YprrrleOulblRIQ6K5PDrX77rsKeCnsnAnMS2+H+rpbL2qM5ttvfvy2hxBCT5Vkr6UCD1xewQstxRVyCgMMrJ4CNOxwchprdRdvDC0M4sYPZ8ySY2q9VjGZF4vV8cY/aWzXYF6FbPG9GL/scMwL4TYYAmyJfCzJ8JrM0M8odWAzyzi7TDTHA5GANEqF/cs0w0/r1bB0KEEm2U4qWx1ny3DpPDDPDG2F0meh8bT02E2XnbVhGks8nUN4rQw31lmj/z1QZ3e3lpJ1BAmdq9n9+n104CkprTdgAc+tuEDSUQe0T4b7hbi+kxNEAQWsfU4ByG9DTvaBm8/b+UD45Ze5Yqnva/R9Dvn7Omax+wtxewEOCOHty+UO4e4sNfjg75oVO6XmGzbvfMzPR4/htNRXb/312LdYLZEkcr+s98Ipy/KVgmKp653lK3q+Tenfuv5I7VP6/pjxc0r+/F3Wzz7+v/r6kf7w49//BMgdAIrqfv7ziAFnRcBHNdBLCPTPAiNYwAZOUIIWzGACHUjB51ywgh2EYAI/yEEMbpCEKHygxUK4whFq0IQuZOH4YghDENLQhjj0VQpluClrdU9J3wNi+P+yR8QiGvGIL9qeEJUoPiYuqYU5FKH/dlhDHb4wiq2iIhZPpUUPqnCGWxzbCa/oxTHy0HRmrKIUrXhGP31RjGqEIhvjCMY5hhGNNywjHUPVxTXKqo9y/CMZ/ZjFQQaykG00Xxrv6MZF6pGRifJhEJuYPPApEYmYzKQmqefESf6QkowBpSL3iMcpGrKOgkyk+hxJSFQikpSNzGMr4WhKVSKFhLMs5SPtuMtcTgqQruTiKWnJS1+OspawPGYxDxlMPg5Tl6lM5iplyUxiRhOS00QmNm/5RmhW05vCtKX8EijJIS6xkucM5SbXyc52wqiT5hQlXcpZLWNms5ev3OY4qdn/THA6U5z246c1v/nLZ8ZSm/jsZ0EBuj9WElSZ10zoQMMpTW46VKEQzadE/bkpYE70nxXdJ0LtaVGBcvSgy8ToPSNKUpGm9KMnjaQl0SnPs9ATne7MqU7bCc+e0vSJKi2pPtFn0IxSdKgNNSlKW0pUhgbwojBdqkaZmtSRPnSlU72qUF8aU6wedaNS/SpVn6pUo0bVUR7tqvucesCyevWsmCrqWzsq162yVKsuvWtQ85rVvQaUWDNVZzoBc1PB7vSwiDWiTwVbUxwFlrBjbStS1ZTWsIJ0spLlqmXVuqu68lWseP2rZs3K2aaGVLR6hatd+6raz14WrKRd6GmrOtq5/25WtphlIFRLS9vU8pasVvVrb1n728z69raxRatnUUtc5NpWpoOd52OlG12bJva62K3eYgk7XetWF0eR1e1sKbvc4YJWuMCt7WrRS7/ypve4yX1uXNkq3uC2lrnnva95Xxve/rl1vfolL339a9/i1le9rjXwgAe44NwSGLbzHa9xmxtfANO1wSUssHMtjFsIcxi6jUVKYbn7XRFn98QottF2qRvio4x4nv1lsITz594J51fBMtZwhRPc4RgrsJsb5vGOTevgHCMYvwFuL4ZxGdr94rjIP8awjzPs4c4uGchDdnKQiVxlJF94xg+Gr3w/HGEoU1nMZE6zlcFsZDQLef/Md3qxd1t8Ezk7NsV4zjOLVjznTwI1yQee8pQrq9wr73bLWi40m6P83zermcuDxjKcvYxoAS/6zBSedKLL3OVNr/XSTGZvoJtsaTOHGtBhJrWSQS3pR3va0Ws2dathTeksl7rT7820q3MdKjubuMQu7u6d9UzsYvP5znS2ia9dHGkpq3rUojb0pZs97WenOtqsdja2GW1manfb2m0Gt6y1jepwbxvXNi73uKt9bkJz2ttHfnWt3y1uTN+40un+8rp1rGle87fep2axnz0ZxGIbHM9KNIDCF87whjtc4Q55uMQl3qE/m84ADMi4xjfO8Y4zIOITD/nCMbKjA5j85Cj/T3nKvcQAS4n85Q4xwfKqsgCWv+QDAvhAnV4ucodAwEA2rwgHIhAB1hDdAlPhech9bpGhE/3pEXhARYI+EQvkBm8nacCclD5xplPE6mqZusVaTpHiYB0BHNg61x/udYk0YDAhoAjVIxICswvuJBnA2NrZLoCfT4QDP9O6ROaegdWcHQEi0PveG972ABT+ZxGYyNwjYPi7nwTpcFk84/s+ERFMTeoRofoDmGK5k4hF8wxvO9iRBoLBj10igK88U/J+INSPnPM4T1FkuEJ7qr/dM6DZSeQzb3sDMJ1FbUtJ2kP/eoms/iRe2wnta198puceRbvfieADMPcAIC3up68+PecngjTMM59lZHf+z9Ru+8YH4PlckXvzJ0J5tZif+O0f/0RAcBfQux79FfF4TNF67Id67hcAo8cVwzcRAQEAIfkEAAoAAAAsyQCXAGIAVgGF/v7+RzyKAAAAAICA0M3c6ejpOzs729nn/gAAyNnbAXp6SkCJPDpIRTZ+eitksrG/wRMtLGhoSFpa1+XlrCgoLAAAyRoajCVWqgAAZjJzABYWckVFijk5tRc2AFVVMChcqBxB2gsa0Q4iX09Pny8vqqm2vrzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACP8ACwAYSLCgwYMFBAgocICAw4cQI0ZMoJChxIsXKS48yLGjwI4gDSo8sCCAyZMoU6ZcMLKkypcqWQoISbOmTYUEYOpMiXOnzwAKbQodWrDnT51Gj74MSrRpzaRKeQrIGVUlU6dYOUKtanIr16tZww70WpVsVLBis5pVuvYo2rRO2/6U6/MtXKJ0d+ZFOvOu2qlcpVIN3LWv37iACRceTNju4aeJFe+F6fhxyMlLIzc2bFkoZquaA1furDX0V9NlOZOm+Vmw4sKrPaM+O5ut6tgdW6PUfXI07rG13Qafe/u3yOF1kestbpwg78WvgTIf+hFrQgEJXG7O/rpi2OsKw4v/H0++vPnz6NOrX8++vfv38OOPLzAhgf37+PPrr9gQo3+HGln0n38BfjfBAAgmqOCCDCrQkmIykQShbzUlwOCFGDqoHF+MiTadUBY2GMGIJEagwIIadniaiqmlFaKCCiAg44wyWoDic9KxSJuLDFJA448SwIjjkB/a9CKCEfyo5IkIphgdkTwmqIAFSv7IgZRE6mhblAhKUOWSTWb5ZJEVSvmlkhSEuSFla2bG5QZnKhnBAE521yZoXHr5IwYVYABmnZLdKRWXA/hIYwUCVPDjBmpqKZyjxBGaJI3h0WgBk4BuBmlyhA7AAaUK0Rhko2NuulynMc5YqYxpYinobq/2/0YmTUciCKeMqyIwp6umcjhmp3RSYIEF4Q3LqJCxQmcnsAqGl6GYy4pV64LOXpiph72yyWyCGgigwbPJ5virtBgi6IEGHoCbbWbr4imtAvDGK++89GrIXaD3bladU+DJ5++/AAcs8MAEz/cdfwMSWFF9+jXcsHdZ7YvVg4TJdGC5GNNJIW5ETpsxss2B1PHH5WoYcm7hKuQxyRrPGtvILDe48cspYxezzC6vBvPNrp5cWrtSrUyyyT4fBzSsQn9MdNHO1Zx0xkszDcDOPLcsddNHy/o0xlEzTTXPXRf99c1h+zx2zGWffDbLaYe89tAz6+x01T1fPfXcdFtt99tKx/9NGt9Q+82RxERdl6+HCdSreL0QW1fw45BHLvnk6xFOXeP8Yt7UdfQ57Dl+aQl+kOjHXUy3AqHnjHLqW5ec+utiqXxhBBLUbrsETEoJe1ikF2XzgpOiieLuf7GOIpVfjtok8ROrXlqtI8R5qe6xOz+69ccdmWqcx9LJPGLGJ/hpnDJi+n1TvTf9YvDkX+l99efj9Xuh5NM4J+rw58/7/HrOyKefP5oe/vYXv6HILkHIkxGiFAWk5emveNU7EvtyxSrqEfCBzasVCVQVKvtZEIIX3F+ttkdB9zkwhCD8y7RuRcHcvQ+FzQsfjChAgfDQkALKO2EKYxjBclXLWgWUjQz/GfTDBgXxJtjzndC69a0MHREyQ1zQudKVIcsJhXMH20jEVLa4LsaLcmAMoxjH6B4r2gSLW9Si49TIryeyJolNs9vd3HgZON4lfcCho9z0uLq92XGOGBTbH/EIyKsR8pB/5GNpFGkZRDLyMY4MpNkGSUk5RhKGgnyk7yxZSUmqrZOYnKQm4+jHUeaxlJ40YCJTKURWGueSO8ykK6EoFjPWBI1rtCVNcJk5Mvryl8CcnC5DwsvNaa5wx6SOKQsZS1HO8o2cXCYhpQlKqcGSh4asJjatqU3woTKU8ovmM+soTnCqspzNDOc303nOdW7znbI0ZyuzSU10whN9q5QnEpdZ/89x7tGffaQnQBfpTm/q85ZZHCZIiolMNhozmBCNqETfo1CPJPOKFz1jRhE60Ovxs6PH+ehBoQnSw1zToNzsp0BHSs6C4lOk7JxnTF/ZzZe6VJ0s/WdOA5rSkpJypTOlJVDvidOe7pSgQ0UpUd1W06J6rantTKpNj9pIqMrUOgnNahorOriJevWrYN3oLsVKTLIu1KweUWlQOWbVfd40qmulmVqfOtd4xlVk+bwrT5fanJNO1ah6RSpg+epWqTpVqXYlrFAHi9i/0tWnpzQsXBXL1ro6k6ohfetVG/tJy3YWssx8LGY3WUut5tK0vQyralc7Rq4iBK1ddWhDXWsQ2v96FLTTxG1bf+PXw142sLeV7GYdm1jOTra4xE3uZ0f7U8Yq97i/pWxLhVtY4EJytySl7mKlq1Prkla72fWuX3oL3eWKN7LO9a16mepZ9urWnsYdbuFQ+1DbEoShl2OtfvcLOfsOBL8YlW1+/QsAAqOXu3tF7nrl617mHljB5Y1wX7E73fRKuLqiPW9oIczgDtO0vRMG8Yffq1kMx3fEDt5wdE+8XQ6b+LkhJjF4K4zgzpDXw7ylMF7hC+MXm7fGwY0YfWc75Pzy98hIlg+BAaxRAQd4ySJu8Yp7LOUfszi8GQZyZme8Y9BGGcsurvKFc/xlucrYwjgG85QX7OMGazj/tym+MYrfnFctf1fDZe4ylxNM5cqeOctXpnGY1cxmMv+5jVst8pOzmORGOzo9S4bta53cZCgfus9mjnOdA61nPF+60H7WNI9BLeg1j5nQbrZzcwGN6U6zmtSuHnSpYf03HfPZyq2+dao5resYi7rEYk5zpuk86lPPete5FuyrjR1rUwu72fNNtLRPm8ZHW/vakq5ttgvC5Ftu+7557vWcVf1gZ7f52bUOt41trWxZQ5vZ6f40smndbnMH+9zj5nW9cU3vIC8b3fueN7wD7mtiAxvVAK+quhUu74KTW8X8Hri/3S3uexv6199RdKU17m1se9zRkRaAAUZO8pKb/OQj/1cIyle+cpVlvHoGYIDMZ07zmtucASpnuc5JjhOGHODnQA+60IWeOgaMaedIV4gJtMOVBRT96EjXuUIeEJ2nq6QDEMi61iFwAdBEXeoCoPrVt651B6jE6ii5wJkaIJWvs3zqKlH7l85ePaOnJE4daLvbUQ53lDQgTiBICdpNAgLyZQBWe+d72FPSAfKx/SSDz0D9RID4xJu87yaRPPkggJLBQ6B+COh6YSx/+cWfRASgN7tJ0O4A0MtIVqQvOeblXr8QQL7uAfjAefqkpMNLJ/Y8X7zuy8P73q8e9+lh4Iw4P3rgGwDu6FH+jPJ+/P0ZffjEB+CMfP9758Md++MpPkyNHh+AwQegfoGHvfdNf5L6ib76f7H7SWg/98oDH/MmoX+VBI/7lHz+S+/XfPfHficRAmeierdnfSqheUpie3o3gGKHEq1XJcyHEgEBACH5BAAKAAAALMkAsABiAEIBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/4AAMjZ2wF6ekpAiUU2fnkrZLKxvzFmZsETLa0nJ0pZWdfl5SwAAI8kU404OKoAAGlJSbYXNs0YGAAWFmUyc9sLGQBVVTAoXKMeRJ8vL6qptr68xdMOIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AA0AGEiwoMGDBgQIMHCAgMOHECNGTKCQocSLFykuPMixo8COIA0qPLAggMmTKFOmXDCypMqXKlkKCEmzpk2FBGDqTIlzp88ACm0KHVqw50+dRo++DEq0ac2kSnkKyBlVJVOnWDlCrWpyK9erWcMO9FqVbFSwYrOaVbr2KNq0Ttv+lOvzLVyidHfmRTrzrtqpXKVSDdy1r9+4gAkXHkzY7uGniRXvhen4ccjJSyM3NmxZKGarmgNX7qw19FfTZTmTpvlZsOLCqz2jPjubrerYHVuj1H1yNO6xtd0Gn3v7t8jhdZHrLW6cIO/Fr4EyH/oRa0IBCVxuzv66YtjrCsOL/x9Pvrz58+jTq1/Pvr379/DjjzcwIYH9+/jz66/YEKN/hxrRp9+ABHqXFX0DJKjgggw2qEBLisk0QYMUVvjgdEIlUOGGC17ImGjYcSjihWlp2OADKKaIogIMehidQiaKaKFvNcWooAII5KhjjhqweONz0tkoY4s00iTkABHsqCQGHQIJ45AzYmiTkA8oaaWPA7jYXYhQOlhkSDYqoIGVSlrwo3J8HdkliWLZiAGZVj6QoJaScdllk1LWeCOcVkYwp5N23vlnnkYqaAGfcWYJqJpQshlWjBJYeQEFF1ypKJqUBSqoo1nZmOSOFAhAwZKDfngao0NyipWNVe4Y3o49lv/6oqZ3quqUkIfq+KqOEpxpamqoymhrU0LiqKtCOvrp66zBjvglSEdGmuOuCMi57JbNcjgsUUcqEEIEEYQHLpN4/kpbthtuO1Sw4UVprm3oRlkih+1SSOd2guI574YbCLCBu8zme+a+FXqwgQcAYyvwoCUq4PDDEEcs8YXc1ZnAxBhPbKB18nXs8ccghyzyyOZV59R1JjeF8ncbn5zWs7kRepnMq8Fc2ss0k2bzQTsf1xxIPReV880/Ex1W0M4NbRnSwOFctNF/Of20z2IxDYDVUh+tNNVTJ5111F17XfXWQoctttZfd2312mT7xXbaU789ttlNz2232XKjTXfeYOP/3XbdevsNd1NY39334daxvNGBLavcOFHgkSz55JRXbjl6KUP+OHWbC4VyfQSGft/geP19tQATLqwA6UOxHa+9rMuGs5oS1G577VjOGftNpj/JYKt9trg7ZLO3OCacvd44PGu9a/omn7HqbjhWrneIaI7kZrn8zMUben2OPq4+PWLdDwD89WZqPz7hzcf4qY6TVpqo+IEjjhikSoY6qpKx0m8/++UbwPGmhSwlWct/1GteAM9HrRwpS331S2AAB5ArBDQQAVhCIPnWV7piHUsAO0qf9CK4Qa0d6XkNzJ0GAcjB1tFKARYIl0LAFYHkKa+FshsbquoFOxzyboIL4qGD/7YHtPbxy1/pImLMgKgggyHMQpnj3OI4NsWTwShjWHzY5bbIxS560T1R9FznbLIyxlVRZUqEmgT3pkAf/oZvaxScG5lHt9OlkWemuyPX/lc0OJYwbH5koRxJKEhAtpGQfTwkH38WyNLVsZEufKQi42hIPZaNjZY8WyXnyD1McrKIkswk4DaJyEh6spQ5/I7iwkjGMdakjFT8oixnScvKsfKVrqQJLK14RsiJ0o6f1Nkk/6i2YRaymL8sHCp/GMzOQDKVpFwkNJHZTDyGspp7pOYyiXdKaTJzkN7kJjgpSc64GdOR3SznMc2ZzDxi85LbxM0zvxlNdaJznMTMJxrNeP9LXeYyJLt0XC0HStCCvqefAP0nSAKquV5Sp52/hOg73XZOU+JznRhNpEQves9wyrOi02TnREepTY92kqMWNWnNQErPktozpS7VZ0djmlGYijSeJ62nTG36tHmKU6c1DWlPWfpTms50p83xKR3FgtCFKtQjT+UIQ6Vo0Kpa9apNhapDxbjVVnb1lRtVqTDDalSeHnWoZL2pWNWo1pcKVaMjBSZK32pWRhJ1qXNtKVKNo9SclpWugOXrXf3a1r0WtbBB1Sta46rMtVozrmlN7BsHC8p0GhavQD2rZh/Kz1V61oxXDa1oZ5lVqUYVIac1yFQ9F9nNJpWyS7SsZDH7V8X/ulawrbVrbl+7W9wy1p04rWxwx/rbaw6XrYs97mPzetjZxqavws1sXQM72d5Wt7iyvW1z4arcbCJWu7T97nRty1kqlha1X/VnehM62va613LnVW1qC7Jar8aXvtYNb3Id6939upW8vMUuc/UL3ufCFrnc5S88pUtdAutWwAwGsISv290F13a746WwgjV5YQc32MD5BTGEO0xY5640xCcesXg/HF0St/iyAP2seWXMy/fa+MYfuy9B6ovL9TrVxx5BcWwHXOICp7jCHF7xhD3sWySTVMkYZjFxnSzXCEd5ySKmcmP/e+Uuf1TIzjzwcq3MZC9necNP9i+MX6xmEw+Z/8xFzvCXVdxmI7/ZxXde85TRXOXEdfbPMwYtjgdN6PXoeCA8Vu+hAZDoGNPZzmEG89LE3N8Ec7nMc9YycPkM3T1fOs4B1rRxOb3pT7MZ0pOW9GM6jeAHizq7cgZ1k0k9alPnuc6xPrWraQ1rKbc61Lwmsq59jRAaC3TRjf5xoZfN7PkorgDQjra0p01taCuk2tjGNoxUyZADePvb4A53uK+d7XJHm9zmLjdOui3udrdbO1yRSbrNje55V1shI4B3VRbwLuHYO9v1/re0FeKA6PwqAxBIuMIhUAHQCPzeAng4tQn+EoQvPOENUImpKsAnBkhF4tMOuMQpnhKOw0njKv9BVAY+DvJzR7zl1hZAwVHCAESBICUfAsH1Mt6bl8Nc5A8n+UkycD2Pn4QxNb9eB3bj85YDXeBCDwAHvgcBlDCG6N9reGFgHnOuFyDqJPgeAngegME0QOw56rnXn/5voZv8e0s3CVU+YB5KWYnnbLd33udNcbqXx+5W4oDcC1N3JVV961zfe7opjp797Wjlgw+A38kDeB0JHvE/bzrI+153+e3I6GVPSQeuB3nMO13zI5d5SrJu9ZScHVEsz/zaVY+St5+89SmBAJ+0rvbEoz7otEfJ6OFE9sijZOpkijvTZz/7maPk9VY6PO5TwoEGWP/6Dbj88n3f/JdgH/ugP/oorJjve+cTpt/EIX/mzR8Y9CdH/adnP1fcrxMGKJ7ev4e6AETwGgYEBAAh+QQACgAAACzJAMkAYAAkAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2ef+AADI2dsBenpKQIlFNn55K2RGW1uysb+qKSnX5eUsAADCEywwZmaQJFOONze2FzaqAABlMnNpSkoAFhbQFhbbCxkAVVUwKFykHUOqqba+vMXTDiEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wANABhIsKDBgwYECDBwgIDDhxAjRkyg0ECEBBgzatzIseLBjyBDiiSo8MCCAChTqly5coHCCANiypxJs6YChSNz6gSpkADLn0ADKExQs6jRmwJ2KlXaM6jTlEONSp2JdKnVkU2fOo06dWrVq2APZtUKlGvXozjDqh04lixLs2dtpl0btq1blXDjUp1L96rduyjz6o35ta9fAT4BrxQ8uLDhpX8BM9br+PHOyHcnx61sOSdmt5rPcu4s8jPZ0F1Hk+aJWPFiAUQH01S9Wmxr11Bhy57Nt3ZI01pRe+3t+yPwp8Kl0i7O9jZuobp3y1zO/PjW6NIHUC9uPWhytEmZ//92jvt70e0hBYZNCPvk86EK4sufT78+UvWHFerfz7+///8ABijggAQWaOCBCCao4EIXceSggx6tFyFY7OFnFUzZaUecVRtCFh5YsWWHXmkf1lWiVSHORIEDLLbogAK8nXgYXR3qlGJMFCCg4446cgDjdDVeJiOHQ+504wAQ8KikBnsVyZSTQqp1owNKVvmjhlDqFKSWWYqUogIcVKmkBUB2iZWZJEo5kwZiVkkBYVt6huZ4ahLWZpUQwDknazTuaVCIFtzpJpZ9FhpWiDkqiYEEGFhJ6FpxnllnTEnyKIEAEiypp6FqRQrSl0rqx6OPm0Lqp22TxhTojqLu+GapnZ7/apCnH92oAI+tIpBnmZyamGpMVOqY65WPxtorikVZAAEE+i3LZJPHznjoVPqBFy2Rv9JU7Xm08mlqtjNtIMAG1n5rLohTebCBB+Uae+5VCdgnr30TXlWhWveCteC+/Pbr778AB7yfhVblS2G9BVfU4IMMX+thnxhK5/CTfR6p18RRmmqxiy4SizGXFdeUKJ4zfSxnyFSF2aYDMpksqcY0sXknqQO4nCbM0wmq47M204mzqjrrCGPP3saKaNA6kkm0cbIWFJW6/jEqJgVLowqzuP9lqqSPVc/aNEm6Qd2f1Eq+2bXTXzeX4sh37no22CjLtOqdP77dXNx23klmTHYD/9At00fKLOaVff9t25EKKLvs4hCw3PK7+qbtN3a7FS65eTRZjrdsmv+8G8FLGWwvwqHDN+/pAqeu+uqstw4g6EqJnvBC+JIu3t2QS3u71e72vjvanf8OfO7YCj+875Ebf7yvxO9uuNd9I68788pPHrzyzy+ffPXWN08x99nD7b144eMu/e/ld3++85e3z7361MdvfPr0S77+9/eT7/741e2ff//XK1jtaCchAh7MgERynQIXyMAGBgh2O5Fd6RA4OwjqL4DCq1/05De97VVPg/zjjv84iL4RehB7JuxgBlNYvA+y8GHgeyH+UIjBEtaQfTe8HQj/J8IcXjCEL3OhD/8BCMSbCbGIPjsiD4NorwFaMCcSjJ3tuOTAKlrxigJ74kiiGMEp6oSLP1yiEWmIxKKRUYxJPCMJYajENc5wfjLMWBtPSMcVDrGHZWTa+3boxjD2EWQxvKNv+FhHG+aRd2osJBsTqcIWMtKRitRhHAE5x0YuEo6CrA0hLflFJ3qygFocDxZHScpSDiiU6fEiFFW5RVYOcpInC+QhoSdLNJoRk7PUnh1zKb5KQpKTfoykHB95SWAS0ZZ6rOUfY+nLYv7SkMhEJC6jSctmvtGYeKSmLqG5TCZOs5tj/KYwKUnMaz4Th7w0XzmHOU6RoBIkYFwlBSf4TtuY8p74LGU9EeL/ylTOU4r/lGQmV7PJcwo0nfATJza9uUtt9nKd5GznKwdKmoI6E50OVadCDcrOhoIzjRu9qDkx+tFbelSi4TzpQlPKTZSCVKUcjShMRdrRlq70pTaNKTMhulOXIuSTB9ynQeJJonwa9agMFGpBiOrOfsLTqRWFJUNzSlOZUnWkWA3mTU161ZpWNZslTaY1vZrVY4ZVmjMta0/TSta2mtWnYuXpVL86UYSmL6MJ3WpnLKrWuuL1rmetZmANw1e3ghWuaO2qVemqSamyNIJAHV1AvwhVryH1spjNYmQruFnFrlWnfh3sQ0Pa18ce9K/2E61G9WqZwi72tKrNq2fnWlqC/zoWpyRFrGBJa1jaahW0pv0tY3ELW91uM7es3S1bX9vbxlJ0r7flKnKBS1zh1la6xU3ucTsJys4CVKnNyax4x6sg8A6Eqf40LwDQa9vntja6ceUtc+d7WO2OdrmfHW5U3fsY1+Y3u9TFrnWbW9232ne1sw0ufUNr3PsmuMD/HfCCFVzfAMcXv76dcHvtmtoGI1i/0OUvYeGb2OmCuMTcDap3uzjZM5H3xTB+4AALQOMa2/jGOKaxQnLMYx4PBV8HCLKQh0zkIu+4x0iu8ZGTjOSeMKTIUC6ye1zjEgEwOclLvnKOFSKCKQMmMUD5gAA+4B0ra9nHZj4zjhXygKCImf/MPwGzSi4wgQnop84VeEua1WzjLPNZxwJoM0vofOc6N0AlckZJBVilkB0x4DV/vrGf/8xmliwaAblCAKJZgqtG6+gCkI60kvcc6UqrhAGMFgCPQJCSRIOg06re0aFzI+pR1xrQgk7JBVKtpEcHQM6ohjWPOoAXUlPa2Hw2NUoyIGweTQAlct51s3eU58AgW82TTnagVTKCacv61ylpQJUyrSNa3zrb2N62okPl6WGDGyWvZneseTRrdJ/Z3lpWtrR5XaUMyLlKl9Lajp5t7VsXAN9XVjazLYWpKoFazpfe0aIaxaMMmLvWCGeysgOwbwRMvEqPTnQHdAbqi4s641g9VndKglbtRItbUKHG+LXvrfJ1wxzaLJnAnapt8lLPPN81R8nI2zTrd6tk4VUidsxP/vOEBz0AL68SwVESEAAh+QQACgAAACzJAOUAYgANAYX+/v5HPIoAAAAAgIDQzdw7Ozvp6Onb2ef9AADI2dsBenpKQIlFNn5JWlqysb+LJlapKSl6K2TX5eUsAAC3FzUwZmZnMnKLOTmqAADDEywAFhbMGBhlTEwAVVUwKFynHELSDiHbCxp/Pz+qqba+vMUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI/wATDBhIcECFBggTNlBQcIACAQIASJxIsaJFiRAvatSYcaPHiQIbVkBAsiTJDQwJPoz48WPHlhtfwrwYsiAEkzg5FFw5MybLnhZlAgXZsAHOoykdCh0KYOlQp0BrOtxwFOcFlVB7Zp25FaZUDlWPVhjIkynFri3RfqypIOxRCGTVepTr0yzFmhfcilX60y5djn3N1hyJE8MEDEj52p3492Jji1JvmpwgYELOuIGZPq64+e5OnBAFmESJeTHGzE9RR22Yt2Rok2NLm+7MWHVPqQ5NviYJF6ttrr9h0paIe4BRkrsRJFU8O3ha52sbDrwAAULo6jp3Dm8KfW73jcULhv+WLnvx9u3hCY4nX9b8d8CmAaQfqEGABvLM3cdHj39gBw0d4NeeX+85ViBkCiSo4IIMNriSAYsZABGEdkkoAIVmWRjahhx26OGHIIYo4ogklmjiiSimqKKHGDJlYYtDvShBAjTWaOONOE4Y4X4HciaABP0FWV58BPI4n5D5EWnWeQKEd5BCCC2XpJKpGSkdYUdt0NCAVALFJG4KUBVWA9r12OWXRemlnG9dLmlmbVK1paYIbLZZZXNStaZmSlza+ZyVZKlZUm99+umdlf95eFhVYxVqKHx4DlDfh5bhRJqjjwb15mkCJdrhojiROWWmkLonFZZu9TYqqZoCSpCeYcX/tiqrZ23KHZh6XVUnraUSWBxYYS2HKa9obklddchCIOquvLYaKZICbsejq9BuKW1z1FbLbLO1ZqvtrD3BCNSLEUKUgIPoOqhjhSu26+678MYr77wfihvuuhni66K+MU6L7b/c+uivfgELDHCRBXd7sJsJK0zwww0zOXDEtkrc8GkTM3wxdxlrZmvHd0KcsMUiF0wywhefrHHKFbe8MccLe/yyyjJvTHPIFIPs5ccx47xyzj3vPLPLQYdb7oVH2zsTuezS6/TTUEct9YhKw8R0vkg3XXVLWwtHdMkB3yy0zV+jDDTYY7Oss1Y8o8322sSW/fPIctesdtHADQ2313rj/8032Xv/Cbjfgt/tdt6DH/634WbPbXLdPj8euEttN2635WFDnnaFSXeu9dFThy766KSX2LVHV++bNdanb9T6oYRT3rfihZ+N+eZ0Tw47445HnrnudSV++9vC94675LHvbrvxxPN+OfNxA99r7skH77zvx/9e/fTI0y578c+Hr733ylNPvvXLi589t2I3z/nn8LMOeun0128/1K9flHqM/I7bv9HbM5D0/NQ+xF1vfe4z3/AMmD7sJbB7C1xcAxHIQAVCT4IWVN8DxxfB2mXQgRWE4AU9KEINhpCDI/zeATdIQVoVEIMlBCEMUWjCGbJPcyxs1gtJSEMZ8nBcnpNf/P9Ud78iGvGIJ8qfRfbnv9WpTokVgaKzzse9HrbwhzrE4QlvOMApTjCHW8xiFw22wjCa0YVatKEYA+jFD15RhV88Ixajx0YyxlGNciTVDuHoRjDiEY1jdNgd5/hHVu2xfDF8IyKt6EdCGjKNjvyIFCfCxHs5kX+XbCISN8nJTk5SIpVc2v9EmUkAUlGAdTwTJPmYyEaykpF5XCQXU1mb2XXwlbM8ZRtbGUv09bGXVczlLWW5Rl3a8ZeFxGUxh+lLXiaTmHQ0piCRGUloApKWGAOfD5UZTWYGc5kptOYjA1lL03wSAKG02ijVWUpSdvKd8KzfOdPJtXXWs51WI2c2pUn/pUM2E5bP/KcwwylQcNawmo/y5ze7SdCFXpOf+xwkNwNqKIWisowUvahExTnRcWITZhhFqEM9ClGQbrSgwCTgKjmaKYvuEqAi1Sg1O0rTlq4UpfkcIibnaU9JxvOnQJ0aT/F5z6HOE50HSKpSl8rUprr0mEw5wAICQNWqWvWqV13AU6fJFAJg1aoeEIAHvkrVrZZzMV7F6gcoQIHQsPUBWDVrRM2SVqs+wDUQKQkDripXk9IVq7rJK0kowNebjvQjdaXqXfEqmpJYwKp93U5iGQAawZIEBJA1rEy7alUKVLaxJYFrWTX7UqDU1QJHSU5JqhpZW9X1A6m1bEkiMFp9//qVs1SlLE4oU6mSYLa2H5WsXQsDKseylrRQHUpiAyAowh7XtsK1agTUVFjouharGXCLaJ8b3OteFbVhiStyuarcr0bgAehN7wMeW93uxme5ZI1va98b3/pyt6TRta98x3tWu8BXv1VlQE9RN2DXFVgjGgqqghfsrqEW4MEQjrCEJ/xgiFD4whc2146ag+EOR9jCHvYwRAhggKaa+MQ8CrGIBaDiDkOEBFMFMFYXkOIWZ5jFNqYwRBwQ37CONb41zrGEQSzkDwuAx1ilQAZCk4Em0/aqQS5yhXEs5SkjebgIUC1WoyxlIld5x1fVbZZl+wEoc7jKU0azla/qWeTIFsoBe60ql4vs5S4f2argdTNoSZIBq85ZyHWm852rCoLA7pkkTw7An3McaEAPOgCLZexRQiDnM6O50Yx+dJslfRT2LtrGmAb1o/NMEt4exbmKtvSXqbzqKwdg0wgwDGJwEudUu0fNBQh1i8FsVUFt19YEwrWuVczrqkY6vH5WtZ2F/eiqhsAtiabqp3fNaju7mqrTrUqfzXxrNQ87xMWWbgTGTe4I1LrS3b50tQV9bRmjO9jeXrej2+1uYLuJ2cymt7sdHG9mj6DeYQ4IACH5BAAKAAAALJgA5wDFAAsBhf7+/kc8igAAAACAgNDN3Ds7O+no6dvZ5/0AAMjZ2wF6ekpAiUQ2fklaWrKxv4omV3orZLcXNasoKNfl5SwAADBmZow4OGcycqoAAMMTLAAWFswYGGVMTABVVTAoXNIOIaYdQtsLGn8/P6qptr68xQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj/AAEIHEiwoEGBAgQcXHgwYYIBECNKQECxYkUOEQcoSMiwI0GOHj2CDMlwJMmGCk+qXGlyJUIBDzM2sEiTooKIG1O6/Khz50ufPIEObCkUKFGWMDMq2FCTpgWcR5EWBRBVZdWTV6eSzCoyaUQOTWtWgJhzKteuZnv6PKu1pFqXDnGGrSmBLFu3afMWvdvWIF+/XgdYmCtW41/AeoUeLri4L9W3SGMOqFATAwUMNW+W3QtZKufEjj0bDTxgokUKAijQxGi4s1XXWGFvlR2ycV/bjyVrpJlQgMUNN1uDXksb7efQcIu71S3YYm+LY+0qx3tc8fSFuLXijqvUecKKdaFe/0c5fGf27OUjZ4Q4k+Jzm0rPj0dcfTTy5JyZQ7QgQULv/qyJl55o9tV3X23z8aRfRr2tJ6CB5iUYFIQHUjfaghE16KBwFBJI3IAV0nfhhhFpIIAGJG5mHYixsRjiUBK+hCFEHWjQQYryuYigjh4ZIJQBCfnoE5AwKWDkkUgmqWROQg4Z5I9PAkVkkztNCWVvWGap5ZZcdunll2CGKeaYZJZp5plopqnmmmy26eabZiZAwJx01mmnnQcQmcCefPbp558JROmkAFS6NOUEgCb6p6BVBnnAnZBCGiihj0Zq6ZwJLBDAppx26qmnCxyQUHAklsrhipxNYOqqKo52gKafxv/66QIJvSrrrZzCiiuuBIy6aqmtfpjfr8DKR8CuuyZ0LLLMNttprwKQSmx8Mf40rIMVNKDttg1Ie+poyzrrqbLilrsrtN5O+62w1ulHWVMbrBdshOGaGwC59ub7rK/qUstjR9zhxFRYDfjLWb3m4quvvuj2azCq7a7X3lykzpscwuUqvLC9DTss3b/LdUcYAiI8aB3G4mq8cbkde2yxhxHqNtjICAT38msoO6vyys627PDNLV67G80IhAf0bDk3uzPPzPrc79E7Cj1zRZZhVhjUXSXN7NJMn8uvyzkKPTFFqKlGE3Dr0rsw113f6rS6WANcbW4ZmebedzQVnPbFawv/oHXbn749bdwWjhjRu3f7ZlF4eyP1d7J+A97z1z+HHXFGU79XUXSN49z345JvKjixhGM3d8BkVaQ5Ak+ZDO7noTdN+dOWG54RWAisLm3pKIF+K9uxczr6r7yLaDtO/P3Xn96uE+e7rMAHH8DwrNZO3Iwablg8Y5HrG33w1Ju6/YSXb5i9g+PD+Hys38cefrGnxz+jiSjiGP/6n7Yf+vv2g2z6jDW6Uf8OBjvpyWoBk1qSApfEKEM1cCV6WqAEj/RAlehJVwmDCQYzBqcOevCDIAyhCEdIwhKa0INyupSl8uQQFVpqUoWCYAVPYiUpzZAkU6qUC+8EQx3usE6Zyleo/xKyQWfRKn7+Iw9nbGWvIzLRXEUUF7T6lkTjset13uueAWM1xSxWkXtf5An+xqXFLXqqi/myXoTCqL4CmvGMCaFih17DxpeMsVP6kxwa7aVG/MwxNnfkVB4Bt8cM1vExhxxk/sr4RtHF0Yt/nE0iGcnBQK6skByc5CQteS9KvhGTKUNiJKN2Mjc20pECkCPECrRK55nylKDUmShbuUYCZpGTC4ul0mbJyl668pan3FcqIUlLPxbTca9spC63xssrOlNtwAwmKlXpy1oe03PRlOYykdVHmBkTi2n0pBm3CTlNjjJryfzkI8NpzmsCMp3jXCcfm2nNavItm8EkJ666Sf/Hc8qNk4pkGgLlmcEYWvCGIanhoAxKQ4dEcZdBDOcJJ0rRilr0ohjNqEazlMIf1omFMPGonWB4JYbiEKE9QmlHcihSIDqqpXSKKBRFJYCHIuuI7bQnUp5YLifadFc/RZY+f0fPbz7znuHEJcMImkl/Fu6oyMQnLJkaypxCFZtJleY0ianTfroTafDc4lChV1RvmhWr81RqvsbKvrJ6tavvlKoyqSpLq9bzl1nVKlsXaVejQjOv2qTrLvt61rgCNp+CZSZh34rXtGp1eonlpluD9lUEAVScYo1sOZ1quk2G1YB7JeNiKQtOx+pVs/ucrCQ527vPSi+0eFQtKeEKVrn/qnOY7GStFe/6V9MGFrfzHO1qS2nbeALXkLoFY3LF6FrwoZaowp1tYzPISQYsVQDWtRcDVMoQhTaKUCW9kiW3i1lkkXej6E2vetfL3vaO0KQJ5e5BvOtA8NrQvgsN733h26MD+Pe/AA6wgH1UKwEb+MD8JO1VW2SAAzsYwBxp8IMfHNRbDbGmQkzwcGmLIJ6Ky6dCzOVzyRpd4xD3sFM9blMra2IOo7O4mVVxVZcLI8/CGLQjbmuJ5ZaWy6qVYznmK42txeJ/Ntd9QRbtkBG55IDG9sfmgq0gZdviBdcWxXOVcV2bTGUj3/i1SY7tjp/aW+o+Vsqb0rB0eYtU3yJW/8uD5bKNsXxbalp5zX5ts5lPC2fFypnLPj5zmKc85s7aks7GtTObC7vh6VZS0H2WbKGVeGI3p1jReWY0nqOK6BhjWtNVXjRa9/zbTzNW1Aou86P1lV0gYzdf5OXvSuU7X1obhL4yLO+uyDte9/r618AOtrDRK+vu2poguD4ofr9b7IUku6HLrm+z5zvhBxNYANV+sJpDnWk6SjjbAo4wuA1c4QPStNyxwumfi+wWDxuxVuielYgjvVl2G9rerf2yc+md2knvVs+rLjVX78xtUHu50zjmN3TX7eKDWzrLpk51tyXO6YfXeeCobnTGLXvk/Q06zV0m88SvbPFEY3zkm/8+NcBDCWWWfbyT/lZupUn95ohrHOUFH3XAa35yg4vc56YLNJ9tnnKKq7ziNL90z4/OdKMbtuSeXrrTbw70fCMczAonMcMJ7vCkQ1zqVG962J/u9YvnFt//rjpghC7wszf852LnuL6RnHUdb33jL776rq/b6nLFWr/53a94We3kTZ132IhPvOIXz3gvTfvWxx7Is08a7Vw/viCTj2/llX15ZI97wI/5fLhDfu+3d/bboqcpAFAv+niD6twZJj2lTY8SdzcLxE2cN9FzPvWik7zsJnc71+He+7xDPeG75/HdVy7Llkvx5dtWPtq5x3aeCx/v0qf92juuR+jLPu1xNz7/8KN+fZxnf/hB5z4hvR9z8pV2/MgHu+/nz/vf71zp5Ve7zLVPffW3Dc0wt3z6x1xz53F1J2TT537oZ3XHh3XJR3xjF4Fyp3d094Clt4DgR3b393X5F37nh31uUX34F1wC6IEh6H9dA4DRB4H013UbaHYkmIA1BmgoyDQq+H37h4H9V4Cy0ncu92raFXmSJ4QCkXkptXnQ1mu6hiuH13hO+IRQGIUY1XlDiISaR4VFSISrp4VGOGtWeIU/khapB2E4qIAgeHpj6F/iloZp4Xqdom4ySGT8xxO2xyy4B0VpYYGzp4NmyHy7xEl5KH/114IsKH4v6CyB2IHF94Hmd4I8/xgriRiDcziDcfgYIlgukYhclbiC6feIn5KJKzaJcsiHbeSJngKKM7aJZViKFHgrqLhlqjhnDbgrrxhnsUiDptgptehnt6iKlyguuyhpJbiIjtiKshKM9SaKTOaLNcgpyNhvwyiBhshyC/OMC9eLylh4IAeInKGHGSiNjIh0h9gs1qh12EiKdtSMm1KOdneOZ8iA8Ics7IiA2biK6ZiLztiNghiOJniBqkaN+oKFWwiE5vJ3gneQgTcoSqiEUtiQDvmQEKkmAtmFzsaFFvmFRziRFzmRaZgnodeRnLiH74gYrPd5aziGbpgrsNdEIfmNhGg6dXhT8BZi1+WNOTiSN//ph8zkfJNzgErmjo3Yica4fj4pZtH4kvA4jvGniOBYiPyogQDZdpKIjstYj78YfFOJk304gKw4ixW4j04Zlv6ok5LFk81yg+1Hie+nlA4IlmPJlaP4j80HaTa5lf0okmQJOWYpO0VJaEc5iNM4l0PnlngJl1XpaFFpfVkZlIV5l9uHj5fEfn/5lPaXmCOoifUoi/G4b3WpllRpLOq4MWg5mWKZlJbJgYtpmC25g0P5f5IJlKq5hKlVXXwHaxsJeMw2eLYpmz0Ykb75m8AJnBqJkcZGnMU5nMiJm9IWhkN2kqm3mnZJjG/5ItHZYSm5KXCYmaR5INvBm+aonbBJnZ7/uXIgEAHmeZ4R8AD0+Jn2eJjimZM69wBz4YPQOZ6M6ZIv0p0YQxgR8JPg+Z/vCZ9kJ5+EcQFGGZ7S2ZjvqZ+dwgA08wEHCqBaaZ8B6p4VFwFEo54gl5ZxeZ+hwaCbcgFEQxF+iaBNOZ0LWi344gFcchk1AQEbup0KeqL3AaJeYjYVAaExaqJIiZ8hAqIsuiUuahEGuqMS6qHVSZ0gyik0058leqSxWaEzKn4QMDL+yZ4cqh0qSkkZMBca+qRYKqPIsaScIqJhsZ4T2qFRKqUCWpkQ8ABwGqcPUKQRGqY8mp9bellZWp9tQaaYaadQGqB+GoqAWqhVopzKpoTJiZC5/8mo9RWckBqpkop4i9qog1IAmJqpmrqpnIqpCdGpoAqqDoGonMeccSgfoZqqmvqpqqqqylKS48anWsplreqqAlCrqZoQJHCdAZCdVFlHqIqronqrwtqpCeEAt8KiHgCNlQis8VOsxkqs0LqqAoCsnxIBGdAbGbCtMHqlaTqrqjitm8qq4uqp1fopBLo6aIqkt7GlaVGumUqu5XqsnuKgiWMRIOCt7Nqn7soZ8Gqu/1oA9NopGHqvFtF3suqs7/qv8iquAxui3qE4FZEBdfqtCuuvDCut83qunPIBEfuiYGqxzfmsGRuwD0ugqoM3FhECIbuv4FqPJquxDsuxAVCwKa0rsURqpL86sgsLrw07rQ9rphVRNjXhpAHYrDyLsT4rs0BLszVrEVVTE62WsElrHTEbs9bKpCPzpUerjBdrtSWbsVm7KSh7phXrsl87Glcrtp8SAnPRrS1rmGlLHGu7tGO7KVXaFBSrr3JbtWobtnYbKxAwuIQ7uD7YtTt7qiS7tFjbmWoqqP0KtozLtoTpoxXSnT27sY1buW2KQ6RKecNZtxs7AoRnnBVpuvMVEAA7", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 517 + }, + "id": "PRV5gq1ZFr4S", + "outputId": "a6175d42-4094-4212-e6c3-bd2ca8d7c620" + }, + "outputs": [], "source": [ "import os\n", "\n", @@ -1187,11 +1110,6 @@ "metadata": { "accelerator": "GPU", "colab": { - "collapsed_sections": [ - "JaIw_5YaUSAB", - "IFraNFqY6s7_", - "4idyWUhW68oS" - ], "provenance": [] }, "kernelspec": { diff --git a/mava/__init__.py b/mava/__init__.py index 21db9ec1c..231c6f904 100644 --- a/mava/__init__.py +++ b/mava/__init__.py @@ -11,3 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +__version__ = "0.2.0" diff --git a/mava/advanced_usage/ff_ippo_store_experience.py b/mava/advanced_usage/ff_ippo_store_experience.py index bd9b3c0e1..9546ddbb3 100644 --- a/mava/advanced_usage/ff_ippo_store_experience.py +++ b/mava/advanced_usage/ff_ippo_store_experience.py @@ -31,8 +31,8 @@ from rich.pretty import pprint from mava.evaluator import get_eval_fn, make_ff_eval_act_fn -from mava.networks.base import FeedForwardActor as Actor -from mava.networks.base import FeedForwardValueNet as Critic +from mava.networks import FeedForwardActor as Actor +from mava.networks import FeedForwardValueNet as Critic from mava.systems.ppo.types import LearnerState, OptStates, Params, PPOTransition from mava.types import ActorApply, CriticApply, ExperimentOutput, MarlEnv, MavaState from mava.utils.checkpointing import Checkpointer @@ -43,6 +43,7 @@ ) from mava.utils.logger import LogEvent, MavaLogger from mava.utils.make_env import make +from mava.utils.network_utils import get_action_head from mava.wrappers.episode_metrics import get_final_step_metrics StoreExpLearnerFn = Callable[[MavaState], Tuple[ExperimentOutput[MavaState], PPOTransition]] @@ -351,9 +352,8 @@ def learner_setup( n_devices = len(jax.devices()) # Get number of actions and agents. - num_actions = int(env.action_spec().num_values[0]) - num_agents = env.action_spec().shape[0] - config.system.num_agents = num_agents + num_actions = env.action_dim + config.system.num_agents = env.num_agents config.system.num_actions = num_actions # PRNG keys. @@ -361,7 +361,8 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_action_head = hydra.utils.instantiate(config.network.action_head, action_dim=num_actions) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) actor_network = Actor(torso=actor_torso, action_head=actor_action_head) @@ -547,11 +548,9 @@ def run_experiment(_config: DictConfig) -> None: def _reshape_experience(experience: Dict[str, chex.Array]) -> Dict[str, chex.Array]: """Reshape experience to match buffer.""" # Swap the T and NE axes (D, NU, UB, T, NE, ...) -> (D, NU, UB, NE, T, ...) - experience: Dict[str, chex.Array] = tree.map(lambda x: x.swapaxes(3, 4), experience) + experience = tree.map(lambda x: x.swapaxes(3, 4), experience) # Merge 4 leading dimensions into 1. (D, NU, UB, NE, T ...) -> (D * NU * UB * NE, T, ...) - experience: Dict[str, chex.Array] = tree.map( - lambda x: x.reshape(-1, *x.shape[4:]), experience - ) + experience = tree.map(lambda x: x.reshape(-1, *x.shape[4:]), experience) return experience # Use vault to record experience diff --git a/mava/configs/arch/anakin.yaml b/mava/configs/arch/anakin.yaml index 1c7041c58..b026cc90e 100644 --- a/mava/configs/arch/anakin.yaml +++ b/mava/configs/arch/anakin.yaml @@ -8,7 +8,7 @@ num_envs: 16 # Number of vectorised environments per device. evaluation_greedy: False # Evaluate the policy greedily. If True the policy will select # an action which corresponds to the greatest logit. If false, the policy will sample # from the logits. -num_evaluation: 200 # Number of evenly spaced evaluations to perform during training. +num_evaluation: 122 # Number of evenly spaced evaluations to perform during training. num_eval_episodes: 32 # Number of episodes to evaluate per evaluation. num_absolute_metric_eval_episodes: 320 # Number of episodes to evaluate the absolute metric (the final evaluation). absolute_metric: True # Whether the absolute metric should be computed. For more details diff --git a/mava/configs/default/ff_hasac.yaml b/mava/configs/default/ff_hasac.yaml new file mode 100644 index 000000000..36448357d --- /dev/null +++ b/mava/configs/default/ff_hasac.yaml @@ -0,0 +1,11 @@ +defaults: + - _self_ + - logger: logger + - arch: anakin + - system: sac/ff_hasac + - network: mlp # [mlp, cnn] + - env: mabrax # [mabrax] + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/default/ff_ippo.yaml b/mava/configs/default/ff_ippo.yaml index 1f3619a7d..5e2cd4dbf 100644 --- a/mava/configs/default/ff_ippo.yaml +++ b/mava/configs/default/ff_ippo.yaml @@ -2,7 +2,7 @@ defaults: - logger: logger - arch: anakin - system: ppo/ff_ippo - - network: mlp # [mlp, continuous_mlp, cnn] + - network: mlp # [mlp, cnn] - env: rware # [cleaner, connector, gigastep, lbf, mabrax, matrax, rware, smax] - _self_ diff --git a/mava/configs/default/ff_isac.yaml b/mava/configs/default/ff_isac.yaml index 73150ff31..c9ff0bb28 100644 --- a/mava/configs/default/ff_isac.yaml +++ b/mava/configs/default/ff_isac.yaml @@ -3,7 +3,7 @@ defaults: - logger: logger - arch: anakin - system: sac/ff_isac - - network: continuous_mlp # [continuous_mlp] + - network: mlp - env: mabrax # [mabrax] hydra: diff --git a/mava/configs/default/ff_mappo.yaml b/mava/configs/default/ff_mappo.yaml index 45c6bf2d9..76fd980c7 100644 --- a/mava/configs/default/ff_mappo.yaml +++ b/mava/configs/default/ff_mappo.yaml @@ -2,7 +2,7 @@ defaults: - logger: logger - arch: anakin - system: ppo/ff_mappo - - network: mlp # [mlp, continuous_mlp, cnn] + - network: mlp # [mlp, cnn] - env: rware # [cleaner, connector, gigastep, lbf, mabrax, matrax, rware, smax] - _self_ diff --git a/mava/configs/default/ff_masac.yaml b/mava/configs/default/ff_masac.yaml index 061f569f7..123cc6c67 100644 --- a/mava/configs/default/ff_masac.yaml +++ b/mava/configs/default/ff_masac.yaml @@ -3,7 +3,7 @@ defaults: - logger: logger - arch: anakin - system: sac/ff_masac - - network: continuous_mlp # [continuous_mlp] + - network: mlp - env: mabrax # [mabrax] hydra: diff --git a/mava/configs/default/ff_sable.yaml b/mava/configs/default/ff_sable.yaml new file mode 100644 index 000000000..bcf11797c --- /dev/null +++ b/mava/configs/default/ff_sable.yaml @@ -0,0 +1,11 @@ +defaults: + - logger: logger + - arch: anakin + - system: sable/ff_sable + - network: ff_retention + - env: rware # [cleaner, connector, gigastep, lbf, rware, smax] + - _self_ + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/default/mat.yaml b/mava/configs/default/mat.yaml new file mode 100644 index 000000000..393781c63 --- /dev/null +++ b/mava/configs/default/mat.yaml @@ -0,0 +1,11 @@ +defaults: + - logger: logger + - arch: anakin + - system: mat/mat + - network: transformer + - env: rware # [gigastep, lbf, mabrax, matrax, rware, smax] + - _self_ + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/default/rec_qmix.yaml b/mava/configs/default/rec_qmix.yaml new file mode 100644 index 000000000..305fa52e6 --- /dev/null +++ b/mava/configs/default/rec_qmix.yaml @@ -0,0 +1,11 @@ +defaults: + - _self_ + - logger: logger + - arch: anakin + - system: q_learning/rec_qmix + - network: qmix_rnn + - env: smax + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/default/rec_sable.yaml b/mava/configs/default/rec_sable.yaml new file mode 100644 index 000000000..7dbdbbbc8 --- /dev/null +++ b/mava/configs/default/rec_sable.yaml @@ -0,0 +1,11 @@ +defaults: + - logger: logger + - arch: anakin + - system: sable/rec_sable + - network: rec_retention + - env: rware # [cleaner, connector, gigastep, lbf, rware, smax] + - _self_ + +hydra: + searchpath: + - file://mava/configs diff --git a/mava/configs/env/scenario/large-4ag-hard.yaml b/mava/configs/env/scenario/large-4ag-hard.yaml new file mode 100644 index 000000000..68d5f4ff2 --- /dev/null +++ b/mava/configs/env/scenario/large-4ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the large-4ag-hard environment +name: RobotWarehouse-v0 +task_name: large-4ag-hard + +task_config: + column_height: 8 + shelf_rows: 3 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/large-4ag.yaml b/mava/configs/env/scenario/large-4ag.yaml new file mode 100644 index 000000000..e15194e7d --- /dev/null +++ b/mava/configs/env/scenario/large-4ag.yaml @@ -0,0 +1,14 @@ +# The config of the large-4ag environment +name: RobotWarehouse-v0 +task_name: large-4ag + +task_config: + column_height: 8 + shelf_rows: 3 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/large-8ag-hard.yaml b/mava/configs/env/scenario/large-8ag-hard.yaml new file mode 100644 index 000000000..336a0e02c --- /dev/null +++ b/mava/configs/env/scenario/large-8ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the large-8ag-hard environment +name: RobotWarehouse-v0 +task_name: large-8ag-hard + +task_config: + column_height: 8 + shelf_rows: 3 + shelf_columns: 5 + num_agents: 8 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/large-8ag.yaml b/mava/configs/env/scenario/large-8ag.yaml new file mode 100644 index 000000000..0c3a50d1a --- /dev/null +++ b/mava/configs/env/scenario/large-8ag.yaml @@ -0,0 +1,14 @@ +# The config of the large-8ag environment +name: RobotWarehouse-v0 +task_name: large-8ag + +task_config: + column_height: 8 + shelf_rows: 3 + shelf_columns: 5 + num_agents: 8 + sensor_range: 1 + request_queue_size: 8 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/medium-4ag-hard.yaml b/mava/configs/env/scenario/medium-4ag-hard.yaml new file mode 100644 index 000000000..1f1ce70d0 --- /dev/null +++ b/mava/configs/env/scenario/medium-4ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the medium-4ag-hard environment +name: RobotWarehouse-v0 +task_name: medium-4ag-hard + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/medium-4ag.yaml b/mava/configs/env/scenario/medium-4ag.yaml new file mode 100644 index 000000000..c0e4af2e5 --- /dev/null +++ b/mava/configs/env/scenario/medium-4ag.yaml @@ -0,0 +1,14 @@ +# The config of the medium-4ag environment +name: RobotWarehouse-v0 +task_name: medium-4ag + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/medium-6ag.yaml b/mava/configs/env/scenario/medium-6ag.yaml new file mode 100644 index 000000000..e8ebb8803 --- /dev/null +++ b/mava/configs/env/scenario/medium-6ag.yaml @@ -0,0 +1,14 @@ +# The config of the medium-6ag environment +name: RobotWarehouse-v0 +task_name: medium-6ag + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 5 + num_agents: 6 + sensor_range: 1 + request_queue_size: 6 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/small-4ag-hard.yaml b/mava/configs/env/scenario/small-4ag-hard.yaml new file mode 100644 index 000000000..6b5fddc1c --- /dev/null +++ b/mava/configs/env/scenario/small-4ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the small-4ag-hard environment +name: RobotWarehouse-v0 +task_name: small-4ag-hard + +task_config: + column_height: 8 + shelf_rows: 2 + shelf_columns: 3 + num_agents: 4 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/tiny-2ag-hard.yaml b/mava/configs/env/scenario/tiny-2ag-hard.yaml new file mode 100644 index 000000000..12765c5d7 --- /dev/null +++ b/mava/configs/env/scenario/tiny-2ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the tiny-2ag-hard environment +name: RobotWarehouse-v0 +task_name: tiny-2ag-hard + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + num_agents: 2 + sensor_range: 1 + request_queue_size: 1 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/tiny-4ag-hard.yaml b/mava/configs/env/scenario/tiny-4ag-hard.yaml new file mode 100644 index 000000000..7f410186e --- /dev/null +++ b/mava/configs/env/scenario/tiny-4ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the tiny-4ag-hard environment +name: RobotWarehouse-v0 +task_name: tiny-4ag-hard + +task_config: + column_height: 8 + shelf_rows: 1 + shelf_columns: 3 + num_agents: 4 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/xlarge-4ag-hard.yaml b/mava/configs/env/scenario/xlarge-4ag-hard.yaml new file mode 100644 index 000000000..94c005e3a --- /dev/null +++ b/mava/configs/env/scenario/xlarge-4ag-hard.yaml @@ -0,0 +1,14 @@ +# The config of the large-4ag-hard environment +name: RobotWarehouse-v0 +task_name: xlarge-4ag-hard + +task_config: + column_height: 8 + shelf_rows: 4 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 2 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/scenario/xlarge-4ag.yaml b/mava/configs/env/scenario/xlarge-4ag.yaml new file mode 100644 index 000000000..7d8f0069f --- /dev/null +++ b/mava/configs/env/scenario/xlarge-4ag.yaml @@ -0,0 +1,14 @@ +# The config of the large-4ag environment +name: RobotWarehouse-v0 +task_name: xlarge-4ag + +task_config: + column_height: 8 + shelf_rows: 4 + shelf_columns: 5 + num_agents: 4 + sensor_range: 1 + request_queue_size: 4 + +env_kwargs: + {} # there are no scenario specific env_kwargs for this env diff --git a/mava/configs/env/vector-connector.yaml b/mava/configs/env/vector-connector.yaml new file mode 100644 index 000000000..647ddd9b9 --- /dev/null +++ b/mava/configs/env/vector-connector.yaml @@ -0,0 +1,21 @@ +# ---Environment Configs--- +defaults: + - _self_ + - scenario: con-5x5x3a # [con-5x5x3a, con-7x7x5a, con-10x10x10a, con-15x15x23a] +# Further environment config details in "con-10x10x5a" file. + +env_name: VectorMaConnector # Used for logging purposes. + +# Defines the metric that will be used to evaluate the performance of the agent. +# This metric is returned at the end of an experiment and can be used for hyperparameter tuning. +eval_metric: episode_return + +# Whether the environment observations encode implicit agent IDs. If True, the AgentID wrapper is not used. +# This is false since the vector observation wrapper for connector cannot encode Agent IDs by default. +implicit_agent_id: False +# Whether or not to log the winrate of this environment. This should not be changed as not all +# environments have a winrate metric. +log_win_rate: False + +kwargs: + {} # time limit set in scenario diff --git a/mava/configs/network/cnn.yaml b/mava/configs/network/cnn.yaml index 27031ec6c..f2a34aaa8 100644 --- a/mava/configs/network/cnn.yaml +++ b/mava/configs/network/cnn.yaml @@ -8,9 +8,6 @@ actor_network: use_layer_norm: False activation: relu -action_head: - _target_: mava.networks.heads.DiscreteActionHead # [DiscreteActionHead, ContinuousActionHead] - critic_network: pre_torso: _target_: mava.networks.torsos.CNNTorso diff --git a/mava/configs/network/continuous_mlp.yaml b/mava/configs/network/continuous_mlp.yaml deleted file mode 100644 index c26929566..000000000 --- a/mava/configs/network/continuous_mlp.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# ---MLP Networks--- -actor_network: - pre_torso: - _target_: mava.networks.torsos.MLPTorso - layer_sizes: [128, 128] - use_layer_norm: False - activation: relu - -action_head: - _target_: mava.networks.heads.ContinuousActionHead - -critic_network: - pre_torso: - _target_: mava.networks.torsos.MLPTorso - layer_sizes: [128, 128] - use_layer_norm: False - activation: relu diff --git a/mava/configs/network/ff_retention.yaml b/mava/configs/network/ff_retention.yaml new file mode 100644 index 000000000..1033f6730 --- /dev/null +++ b/mava/configs/network/ff_retention.yaml @@ -0,0 +1,10 @@ +# --- Retention for ff-Sable --- +net_config: + n_block: 1 # Number of blocks + embed_dim: 64 # Embedding dimension + n_head: 1 # Number of heads + +memory_config: + type: "ff_sable" # Type of the network. + agents_chunk_size: ~ # Size of the chunk: calculated over agents dim. This directly sets the sequence length for chunkwise retention + # If unspecified, the number of agents is used as the chunk size which means that we calculate full self-retention over all agents. diff --git a/mava/configs/network/mlp.yaml b/mava/configs/network/mlp.yaml index 943d3e690..c21dbbc80 100644 --- a/mava/configs/network/mlp.yaml +++ b/mava/configs/network/mlp.yaml @@ -6,9 +6,6 @@ actor_network: use_layer_norm: False activation: relu -action_head: - _target_: mava.networks.heads.DiscreteActionHead # [DiscreteActionHead, ContinuousActionHead] - critic_network: pre_torso: _target_: mava.networks.torsos.MLPTorso diff --git a/mava/configs/network/qmix_rnn.yaml b/mava/configs/network/qmix_rnn.yaml new file mode 100644 index 000000000..83cebb60e --- /dev/null +++ b/mava/configs/network/qmix_rnn.yaml @@ -0,0 +1,19 @@ +# ---Recurrent Structure Networks--- +hidden_state_dim: 256 # The size of the RNN hiddenstate for each agent. + +q_network: + pre_torso: + _target_: mava.networks.torsos.MLPTorso + layer_sizes: [256] + use_layer_norm: False + activation: relu + post_torso: + _target_: mava.networks.torsos.MLPTorso + layer_sizes: [256] + use_layer_norm: False + activation: relu + +mixer_network: + _target_ : mava.networks.base.QMixingNetwork + hyper_hidden_dim: 64 + norm_env_states: True diff --git a/mava/configs/network/rcnn.yaml b/mava/configs/network/rcnn.yaml index 4024ab7fa..128e8fefd 100644 --- a/mava/configs/network/rcnn.yaml +++ b/mava/configs/network/rcnn.yaml @@ -15,9 +15,6 @@ actor_network: use_layer_norm: False activation: relu -action_head: - _target_: mava.networks.heads.DiscreteActionHead # [DiscreteActionHead, ContinuousActionHead] - critic_network: pre_torso: _target_: mava.networks.torsos.CNNTorso diff --git a/mava/configs/network/rec_retention.yaml b/mava/configs/network/rec_retention.yaml new file mode 100644 index 000000000..d6c0241d9 --- /dev/null +++ b/mava/configs/network/rec_retention.yaml @@ -0,0 +1,16 @@ +# --- Retention for Memory Sable --- +net_config: + n_block: 1 # Number of blocks + embed_dim: 64 # Embedding dimension + n_head: 1 # Number of heads + +memory_config: + type: "rec_sable" # Type of the network. + # --- Memory factor --- + decay_scaling_factor: 0.8 # Decay scaling factor for the kappa parameter: kappa = kappa * decay_scaling_factor + # --- Positional encoding --- + timestep_positional_encoding: False # Timestamp positional encoding for Sable memory. + # --- Chunking --- + timestep_chunk_size: ~ # Size of the chunk: calculated over timesteps dim. + # For example a chunksize of 2 results in a sequence length of 2 * num_agents because there num_agents observations within a timestep + # If unspecified, the rollout length is used as the chunk size which means that the entire rollout is computed in parallel during training. diff --git a/mava/configs/network/rnn.yaml b/mava/configs/network/rnn.yaml index e230d48fa..1ca30fb6f 100644 --- a/mava/configs/network/rnn.yaml +++ b/mava/configs/network/rnn.yaml @@ -13,9 +13,6 @@ actor_network: use_layer_norm: False activation: relu -action_head: - _target_: mava.networks.heads.DiscreteActionHead # [DiscreteActionHead, ContinuousActionHead] - critic_network: pre_torso: _target_: mava.networks.torsos.MLPTorso diff --git a/mava/configs/network/transformer.yaml b/mava/configs/network/transformer.yaml new file mode 100644 index 000000000..cc9deb44c --- /dev/null +++ b/mava/configs/network/transformer.yaml @@ -0,0 +1,6 @@ +# --- Network params --- +n_block: 1 # Transformer blocks +embed_dim: 64 # Transformer embedding dimension +n_head: 1 # Transformer heads +use_rmsnorm: False # Whether to use RMSNorm instead of LayerNorm +use_swiglu: False # Use SwiGLU instead of a 2-layer MLP for the feedforward networks diff --git a/mava/configs/system/mat/mat.yaml b/mava/configs/system/mat/mat.yaml new file mode 100644 index 000000000..6a16810d5 --- /dev/null +++ b/mava/configs/system/mat/mat.yaml @@ -0,0 +1,25 @@ +# --- Defaults MAT --- + +total_timesteps: ~ # Set the total environment steps. +# If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. +num_updates: 1220 # Number of updates +seed: 42 + +# --- Agent observations --- +add_agent_id: True + +# --- RL hyperparameters --- +actor_lr: 0.0005 # Learning rate for actor network +update_batch_size: 2 # Number of vectorised gradient updates per device. +rollout_length: 128 # Number of environment steps per vectorised environment. +ppo_epochs: 5 # Number of ppo epochs per training data batch. +num_minibatches: 1 # Number of minibatches per ppo epoch. +gamma: 0.99 # Discounting factor. +gae_lambda: 0.95 # Lambda value for GAE computation. +clip_eps: 0.1 # Clipping value for PPO updates and value function. +ent_coef: 0.01 # Entropy regularisation term for loss function. +vf_coef: 0.5 # Critic weight in +max_grad_norm: 5 # Maximum norm of the gradients for a weight update. +decay_learning_rates: False # Whether learning rates should be linearly decayed during training. + +normalise_value_targets: False diff --git a/mava/configs/system/q_learning/rec_iql.yaml b/mava/configs/system/q_learning/rec_iql.yaml index 6c41fd953..059c55dec 100644 --- a/mava/configs/system/q_learning/rec_iql.yaml +++ b/mava/configs/system/q_learning/rec_iql.yaml @@ -17,7 +17,7 @@ epochs: 2 # Number of learn epochs per training data batch. # sizes buffer_size: 5000 # size of the replay buffer. Note: total size is this * num_devices sample_batch_size: 32 # size of training data batch sampled from the buffer -sample_sequence_length: 20 # 21 transitions are sampled, giving 20 complete data points +sample_sequence_length: 20 # 20 transitions are sampled, giving 19 complete data points # learning rates q_lr: 3e-4 # the learning rate of the Q network network optimizer diff --git a/mava/configs/system/q_learning/rec_qmix.yaml b/mava/configs/system/q_learning/rec_qmix.yaml new file mode 100644 index 000000000..41019ec48 --- /dev/null +++ b/mava/configs/system/q_learning/rec_qmix.yaml @@ -0,0 +1,35 @@ +# --- Defaults REC-QMIX --- +total_timesteps: ~ # Set the total environment steps. +# If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. +num_updates: 10000 # Number of updates. +seed: 42 + +# --- Agent observations --- +add_agent_id: True + +# --- RL hyperparameters --- +min_buffer_size: 32 +update_batch_size: 1 # Number of vectorised gradient updates per device. + +rollout_length: 8 # Number of environment steps per vectorised environment. +epochs: 4 # Number of learn epochs per training data batch. + +# sizes +buffer_size: 1000 # size of the replay buffer. Note: total size is this * num_devices +sample_batch_size: 128 # size of training data batch sampled from the buffer +sample_sequence_length: 20 # 20 transitions are sampled, giving 19 complete data points + +# learning rates +q_lr: 3e-5 # the learning rate of the Q network network optimizer +max_grad_norm: 10 # value used to clip optimiser - set big for no clipping + +# Q Learning related +hard_update: True +update_period: 200 +tau: 0.01 # smoothing coefficient for target networks +gamma: 0.99 # discount factor + +eps_min: 0.05 +eps_decay: 1e5 + +qmix_embed_dim: 32 diff --git a/mava/configs/system/sable/ff_sable.yaml b/mava/configs/system/sable/ff_sable.yaml new file mode 100644 index 000000000..b8579f1a7 --- /dev/null +++ b/mava/configs/system/sable/ff_sable.yaml @@ -0,0 +1,23 @@ +# --- Defaults ff-Sable --- + +total_timesteps: ~ # Set the total environment steps. +# If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. +num_updates: 1000 # Number of updates +seed: 42 + +# --- Agent observations --- +add_agent_id: True + +# --- RL hyperparameters --- +actor_lr: 2.5e-4 # Learning rate for Sable network. +update_batch_size: 2 # Number of vectorised gradient updates per device. +rollout_length: 128 # Number of environment steps per vectorised environment. +ppo_epochs: 4 # Number of ppo epochs per training data batch. +num_minibatches: 2 # Number of minibatches per ppo epoch. +gamma: 0.99 # Discounting factor. +gae_lambda: 0.95 # Lambda value for GAE computation. +clip_eps: 0.2 # Clipping value for PPO updates and value function. +ent_coef: 0.01 # Entropy regularisation term for loss function. +vf_coef: 0.5 # Critic weight in +max_grad_norm: 0.5 # Maximum norm of the gradients for a weight update. +decay_learning_rates: False # Whether learning rates should be linearly decayed during training. diff --git a/mava/configs/system/sable/rec_sable.yaml b/mava/configs/system/sable/rec_sable.yaml new file mode 100644 index 000000000..86f47478b --- /dev/null +++ b/mava/configs/system/sable/rec_sable.yaml @@ -0,0 +1,23 @@ +# --- Defaults Memory Sable --- + +total_timesteps: ~ # Set the total environment steps. +# If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. +num_updates: 1000 # Number of updates +seed: 42 + +# --- Agent observations --- +add_agent_id: True + +# --- RL hyperparameters --- +actor_lr: 2.5e-4 # Learning rate for Sable network. +update_batch_size: 2 # Number of vectorised gradient updates per device. +rollout_length: 128 # Number of environment steps per vectorised environment. +ppo_epochs: 4 # Number of ppo epochs per training data batch. +num_minibatches: 2 # Number of minibatches per ppo epoch. +gamma: 0.99 # Discounting factor. +gae_lambda: 0.95 # Lambda value for GAE computation. +clip_eps: 0.2 # Clipping value for PPO updates and value function. +ent_coef: 0.01 # Entropy regularisation term for loss function. +vf_coef: 0.5 # Critic weight in +max_grad_norm: 0.5 # Maximum norm of the gradients for a weight update. +decay_learning_rates: False # Whether learning rates should be linearly decayed during training. diff --git a/mava/configs/system/sac/ff_hasac.yaml b/mava/configs/system/sac/ff_hasac.yaml new file mode 100644 index 000000000..afedc78bc --- /dev/null +++ b/mava/configs/system/sac/ff_hasac.yaml @@ -0,0 +1,40 @@ +# --- Defaults FF-HASAC --- +seed: 581744 + +# --- Agent observations --- +add_agent_id: False + +# --- RL hyperparameters --- +# step related +total_timesteps: ~ # Set the total environment steps. +# If unspecified, it's derived from num_updates; otherwise, num_updates adjusts based on this value. +num_updates: 8000 # Number of updates +explore_steps: 5000 # number of steps to take with random actions at the start of training +update_batch_size: 1 # number of vectorised gradient updates per device. + +rollout_length: 8 # number of environment steps per vectorised environment. +epochs: 32 # number of learn epochs per training data batch. +policy_update_delay: 2 # the delay before training the policy - +# Every `policy_update_delay` q network learning steps the policy network is trained. +# It is trained `policy_update_delay` times to compensate, this is a TD3 trick. + +# sizes +buffer_size: 100000 # size of the replay buffer. Note: total size is this * num_devices +batch_size: 64 + +# learning rates +policy_lr: 3e-4 # the learning rate of the policy network optimizer +q_lr: 5e-4 # the learning rate of the Q network network optimizer +alpha_lr: 1e-3 # the learning rate of the alpha optimizer +max_grad_norm: 10 + +# SAC specific +tau: 0.005 # smoothing coefficient for target networks +gamma: 0.95 # discount factor + +autotune: False # whether to autotune alpha +target_entropy_scale: 5.0 # scale factor for target entropy (when auto-tuning) +init_alpha: 0.005 # initial entropy value when not using autotune + +# HASAC specific +shuffle_agents: False # whether to shuffle agents during train time diff --git a/mava/evaluator.py b/mava/evaluator.py index 11a1f8f4a..21037c2c3 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -137,7 +137,7 @@ def _episode(key: PRNGKey, _: Any) -> Tuple[PRNGKey, Metrics]: env_state, ts = jax.vmap(env.reset)(reset_keys) step_state = env_state, ts, key, init_act_state - _, timesteps = jax.lax.scan(_env_step, step_state, jnp.arange(env.time_limit)) + _, timesteps = jax.lax.scan(_env_step, step_state, jnp.arange(env.time_limit + 1)) metrics = timesteps.extras["episode_metrics"] if config.env.log_win_rate: @@ -155,7 +155,7 @@ def _episode(key: PRNGKey, _: Any) -> Tuple[PRNGKey, Metrics]: # So in evaluation we have num_envs parallel envs and loop enough times # so that we do at least `eval_episodes` number of episodes. _, metrics = jax.lax.scan(_episode, key, xs=None, length=episode_loops) - metrics: Metrics = tree.map(lambda x: x.reshape(-1), metrics) # flatten metrics + metrics = tree.map(lambda x: x.reshape(-1), metrics) # flatten metrics return metrics def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Metrics: @@ -163,7 +163,7 @@ def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) start_time = time.time() metrics = jax.pmap(eval_fn)(params, key, init_act_state) - metrics: Metrics = jax.block_until_ready(metrics) + metrics = jax.block_until_ready(metrics) end_time = time.time() total_timesteps = jnp.sum(metrics["episode_length"]) diff --git a/mava/networks/__init__.py b/mava/networks/__init__.py index 5fd984351..48c3f6f4d 100644 --- a/mava/networks/__init__.py +++ b/mava/networks/__init__.py @@ -22,3 +22,4 @@ RecurrentValueNet, ScannedRNN, ) +from mava.networks.sable_network import SableNetwork diff --git a/mava/networks/attention.py b/mava/networks/attention.py new file mode 100644 index 000000000..0f5a477c6 --- /dev/null +++ b/mava/networks/attention.py @@ -0,0 +1,77 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import chex +import jax.numpy as jnp +from flax import linen as nn +from flax.linen.initializers import orthogonal + +# TODO: Use einops for all the reshapes and matrix multiplications + + +class SelfAttention(nn.Module): + embed_dim: int + n_head: int + n_agent: int + masked: bool = False + + def setup(self) -> None: + assert self.embed_dim % self.n_head == 0 + self.key = nn.Dense(self.embed_dim, kernel_init=orthogonal(0.01)) + self.query = nn.Dense(self.embed_dim, kernel_init=orthogonal(0.01)) + self.value = nn.Dense(self.embed_dim, kernel_init=orthogonal(0.01)) + + # output projection + self.proj = nn.Dense(self.embed_dim, kernel_init=orthogonal(0.01)) + + # causal mask to ensure that attention is only applied to the left in the input sequence + self.mask = jnp.tril(jnp.ones((self.n_agent + 1, self.n_agent + 1))) + self.mask = self.mask[jnp.newaxis, jnp.newaxis] + + def __call__(self, key: chex.Array, value: chex.Array, query: chex.Array) -> chex.Array: + # Shape names: + # B: batch size + # S: sequence length + # E: embedding dimension + # hs: head size + # nh: number of heads + + B, S, D = key.shape + + # calculate query, key, values for all heads in batch and move + # head forward to be the batch dim + # (B, S, E) -> (B, nh, S, hs) + k = self.key(key).reshape(B, S, self.n_head, D // self.n_head).transpose((0, 2, 1, 3)) + q = self.query(query).reshape(B, S, self.n_head, D // self.n_head).transpose((0, 2, 1, 3)) + v = self.value(value).reshape(B, S, self.n_head, D // self.n_head).transpose((0, 2, 1, 3)) + + # causal attention: (B, nh, S, hs) x (B, nh, hs, S) -> (B, nh, S, S) + att = jnp.matmul(q, k.transpose((0, 1, 3, 2))) * (1.0 / jnp.sqrt(k.shape[-1])) + + # mask out attention for all agents + if self.masked: + att = jnp.where( + self.mask[:, :, :S, :S] == 0, + jnp.finfo(jnp.float32).min, + att, + ) + + att = nn.softmax(att, axis=-1) + + y = jnp.matmul(att, v) # (B, nh, S, S) x (B, nh, S, hs) -> (B, nh, S, hs) + # re-assemble all head outputs side by side + y = y.transpose((0, 2, 1, 3)) + y = y.reshape(B, S, D) + + return self.proj(y) # (B, S, D) diff --git a/mava/networks/base.py b/mava/networks/base.py index f302c0198..b2096be3f 100644 --- a/mava/networks/base.py +++ b/mava/networks/base.py @@ -23,6 +23,7 @@ from flax.linen.initializers import orthogonal from mava.networks.distributions import MaskedEpsGreedyDistribution +from mava.networks.torsos import MLPTorso from mava.types import Observation, ObservationGlobalState, RNNGlobalObservation, RNNObservation @@ -232,3 +233,73 @@ def __call__( eps_greedy_dist = MaskedEpsGreedyDistribution(q_values, eps, obs.action_mask) return hidden_state, eps_greedy_dist + + +class QMixingNetwork(nn.Module): + num_actions: int + num_agents: int + hyper_hidden_dim: int = 64 + embed_dim: int = 32 + norm_env_states: bool = True + + def setup(self) -> None: + self.hyper_w1: MLPTorso = MLPTorso( + (self.hyper_hidden_dim, self.embed_dim * self.num_agents), + activate_final=False, + ) + + self.hyper_b1: MLPTorso = MLPTorso( + (self.embed_dim,), + activate_final=False, + ) + + self.hyper_w2: MLPTorso = MLPTorso( + (self.hyper_hidden_dim, self.embed_dim), + activate_final=False, + ) + + self.hyper_b2: MLPTorso = MLPTorso( + (self.embed_dim, 1), + activate_final=False, + ) + + self.layer_norm: nn.Module = nn.LayerNorm() + + @nn.compact + def __call__( + self, + agent_qs: chex.Array, + env_global_state: chex.Array, + ) -> chex.Array: + B, T = agent_qs.shape[:2] # batch size + + agent_qs = jnp.reshape(agent_qs, (B, T, 1, self.num_agents)) + + if self.norm_env_states: + states = self.layer_norm(env_global_state) + else: + states = env_global_state + + # First layer + w1 = jnp.abs(self.hyper_w1(states)) + b1 = self.hyper_b1(states) + w1 = jnp.reshape(w1, (B, T, self.num_agents, self.embed_dim)) + b1 = jnp.reshape(b1, (B, T, 1, self.embed_dim)) + + # Matrix multiplication + hidden = nn.elu(jnp.matmul(agent_qs, w1) + b1) + + # Second layer + w2 = jnp.abs(self.hyper_w2(states)) + b2 = self.hyper_b2(states) + + w2 = jnp.reshape(w2, (B, T, self.embed_dim, 1)) + b2 = jnp.reshape(b2, (B, T, 1, 1)) + + # Compute final output + y = jnp.matmul(hidden, w2) + b2 + + # Reshape + q_tot = jnp.reshape(y, (B, T, 1)) + + return q_tot diff --git a/mava/networks/mat_network.py b/mava/networks/mat_network.py new file mode 100644 index 000000000..0a6446e1b --- /dev/null +++ b/mava/networks/mat_network.py @@ -0,0 +1,279 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import chex +import jax.numpy as jnp +from flax import linen as nn +from flax.linen.initializers import orthogonal + +from mava.networks.attention import SelfAttention +from mava.networks.torsos import SwiGLU +from mava.networks.utils.mat.decode import ( + continuous_autoregressive_act, + continuous_parallel_act, + discrete_autoregressive_act, + discrete_parallel_act, +) +from mava.systems.mat.types import MATNetworkConfig +from mava.types import MavaObservation +from mava.utils.network_utils import _CONTINUOUS, _DISCRETE + + +def _make_mlp(embed_dim: int, use_swiglu: bool) -> nn.Module: + if use_swiglu: + return SwiGLU(embed_dim, embed_dim) + + return nn.Sequential( + [ + nn.Dense(embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + nn.Dense(embed_dim, kernel_init=orthogonal(0.01)), + ], + ) + + +class EncodeBlock(nn.Module): + n_agent: int + net_config: MATNetworkConfig + masked: bool = False + + def setup(self) -> None: + ln = nn.RMSNorm if self.net_config.use_rmsnorm else nn.LayerNorm + self.ln1 = ln() + self.ln2 = ln() + + self.attn = SelfAttention( + self.net_config.embed_dim, self.net_config.n_head, self.n_agent, self.masked + ) + + self.mlp = _make_mlp(self.net_config.embed_dim, self.net_config.use_swiglu) + + def __call__(self, x: chex.Array) -> chex.Array: + x = self.ln1(x + self.attn(x, x, x)) + x = self.ln2(x + self.mlp(x)) + return x + + +class Encoder(nn.Module): + action_dim: int + n_agent: int + net_config: MATNetworkConfig + + def setup(self) -> None: + ln = nn.RMSNorm if self.net_config.use_rmsnorm else nn.LayerNorm + + self.obs_encoder = nn.Sequential( + [ + ln(), + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + ], + ) + self.ln = ln() + self.blocks = nn.Sequential( + [ + EncodeBlock( + self.n_agent, + self.net_config, + ) + for _ in range(self.net_config.n_block) + ] + ) + self.head = nn.Sequential( + [ + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + ln(), + nn.Dense(1, kernel_init=orthogonal(0.01)), + ], + ) + + def __call__(self, obs: chex.Array) -> Tuple[chex.Array, chex.Array]: + obs_embeddings = self.obs_encoder(obs) + x = obs_embeddings + + rep = self.blocks(self.ln(x)) + value = self.head(rep) + + return jnp.squeeze(value, axis=-1), rep + + +class DecodeBlock(nn.Module): + n_agent: int + net_config: MATNetworkConfig + masked: bool = True + + def setup(self) -> None: + ln = nn.RMSNorm if self.net_config.use_rmsnorm else nn.LayerNorm + self.ln1 = ln() + self.ln2 = ln() + self.ln3 = ln() + + self.attn1 = SelfAttention( + self.net_config.embed_dim, self.net_config.n_head, self.n_agent, self.masked + ) + self.attn2 = SelfAttention( + self.net_config.embed_dim, self.net_config.n_head, self.n_agent, self.masked + ) + + self.mlp = _make_mlp(self.net_config.embed_dim, self.net_config.use_swiglu) + + def __call__(self, x: chex.Array, rep_enc: chex.Array) -> chex.Array: + x = self.ln1(x + self.attn1(x, x, x)) + x = self.ln2(rep_enc + self.attn2(key=x, value=x, query=rep_enc)) + x = self.ln3(x + self.mlp(x)) + return x + + +class Decoder(nn.Module): + action_dim: int + n_agent: int + action_space_type: str + net_config: MATNetworkConfig + + def setup(self) -> None: + ln = nn.RMSNorm if self.net_config.use_rmsnorm else nn.LayerNorm + + use_bias = self.action_space_type == _CONTINUOUS + self.action_encoder = nn.Sequential( + [ + nn.Dense( + self.net_config.embed_dim, + use_bias=use_bias, + kernel_init=orthogonal(jnp.sqrt(2)), + ), + nn.gelu, + ], + ) + + # Always initialize log_std but set to None for discrete action spaces + # This ensures the attribute exists but signals it should not be used. + self.log_std = ( + self.param("log_std", nn.initializers.zeros, (self.action_dim,)) + if self.action_space_type == _CONTINUOUS + else None + ) + + self.obs_encoder = nn.Sequential( + [ + ln(), + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + ], + ) + self.ln = ln() + self.blocks = [ + DecodeBlock( + self.n_agent, + self.net_config, + name=f"cross_attention_block_{block_id}", + ) + for block_id in range(self.net_config.n_block) + ] + self.head = nn.Sequential( + [ + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + ln(), + nn.Dense(self.action_dim, kernel_init=orthogonal(0.01)), + ], + ) + + def __call__(self, action: chex.Array, obs_rep: chex.Array) -> chex.Array: + action_embeddings = self.action_encoder(action) + x = self.ln(action_embeddings) + + # Need to loop here because the input and output of the blocks are different. + # Blocks take an action embedding and observation encoding as input but only give the cross + # attention output as output. + for block in self.blocks: + x = block(x, obs_rep) + logit = self.head(x) + + return logit + + +class MultiAgentTransformer(nn.Module): + action_dim: int + n_agent: int + net_config: MATNetworkConfig + action_space_type: str = _DISCRETE + + # General shape names: + # B: batch size + # N: number of agents + # O: observation dimension + # A: action dimension + # E: model embedding dimension + + def setup(self) -> None: + if self.action_space_type not in [_DISCRETE, _CONTINUOUS]: + raise ValueError(f"Invalid action space type: {self.action_space_type}") + + self.encoder = Encoder( + self.action_dim, + self.n_agent, + self.net_config, + ) + self.decoder = Decoder( + self.action_dim, + self.n_agent, + self.action_space_type, + self.net_config, + ) + + if self.action_space_type == _DISCRETE: + self.act_function = discrete_autoregressive_act + self.train_function = discrete_parallel_act + elif self.action_space_type == _CONTINUOUS: + self.act_function = continuous_autoregressive_act + self.train_function = continuous_parallel_act + else: + raise ValueError(f"Invalid action space type: {self.action_space_type}") + + def __call__( + self, + observation: MavaObservation, # (B, N, ...) + action: chex.Array, # (B, N, A) + key: chex.PRNGKey, + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + value, obs_rep = self.encoder(observation.agents_view) + + action_log, entropy = self.train_function( + decoder=self.decoder, + obs_rep=obs_rep, + action=action, + action_dim=self.action_dim, + legal_actions=observation.action_mask, + key=key, + ) + + return action_log, value, entropy + + def get_actions( + self, + observation: MavaObservation, # (B, N, ...) + key: chex.PRNGKey, + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + value, obs_rep = self.encoder(observation.agents_view) + output_action, output_action_log = self.act_function( + decoder=self.decoder, + obs_rep=obs_rep, + action_dim=self.action_dim, + legal_actions=observation.action_mask, + key=key, + ) + return output_action, output_action_log, value diff --git a/mava/networks/retention.py b/mava/networks/retention.py new file mode 100644 index 000000000..a041abf33 --- /dev/null +++ b/mava/networks/retention.py @@ -0,0 +1,323 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +from chex import Array +from omegaconf import DictConfig + +from mava.networks.utils.sable import PositionalEncoding + +# General shapes legend: +# B: batch size +# N: number of agents +# S: sequence length +# C: chunk size - T * N in a chunk +# T: number of timesteps + + +class SimpleRetention(nn.Module): + """Simple retention mechanism for Sable. + + Note: + This retention mechanism implementation is based on the following code: + https://github.com/Jamie-Stirling/RetNet/blob/main/src/retention.py + """ + + embed_dim: int + head_size: int + n_agents: int + masked: bool + decay_kappa: float # this is gamma in the original retention implementation + memory_config: DictConfig + + def setup(self) -> None: + # Initialise the weights + self.w_q = self.param( + "w_q", + nn.initializers.normal(stddev=1 / self.embed_dim), + (self.embed_dim, self.head_size), + ) + self.w_k = self.param( + "w_k", + nn.initializers.normal(stddev=1 / self.embed_dim), + (self.embed_dim, self.head_size), + ) + self.w_v = self.param( + "w_v", + nn.initializers.normal(stddev=1 / self.embed_dim), + (self.embed_dim, self.head_size), + ) + + def __call__( + self, key: Array, query: Array, value: Array, hstate: Array, dones: Array + ) -> Tuple[Array, Array]: + """Chunkwise (default) representation of the retention mechanism.""" + B, C, _ = value.shape + + # Apply projection to q_proj, k_proj, v_proj + q_proj = query @ self.w_q + k_proj = key @ self.w_k + v_proj = value @ self.w_v + k_proj = k_proj.transpose(0, -1, -2) + + # Compute next hidden state + if self.memory_config.type == "ff_sable": + # No decay matrix or xi for FF Sable since we don't have temporal dependencies. + decay_matrix = jnp.ones((B, C, C)) + decay_matrix = self._causal_mask(decay_matrix) + xi = jnp.ones((B, C, 1)) + next_hstate = (k_proj @ v_proj) + hstate + else: + decay_matrix = self.get_decay_matrix(dones) + xi = self.get_xi(dones) + chunk_decay = self.decay_kappa ** (C // self.n_agents) + delta = ~jnp.any(dones[:, :: self.n_agents], axis=1)[:, jnp.newaxis, jnp.newaxis] + next_hstate = ( + k_proj @ (v_proj * decay_matrix[:, -1].reshape((B, C, 1))) + ) + hstate * chunk_decay * delta + + # Compute the inner chunk and cross chunk + cross_chunk = (q_proj @ hstate) * xi + inner_chunk = ((q_proj @ k_proj) * decay_matrix) @ v_proj + + # Compute the final retention + ret = inner_chunk + cross_chunk + return ret, next_hstate + + def recurrent( + self, key_n: Array, query_n: Array, value_n: Array, hstate: Array + ) -> Tuple[Array, Array]: + """Recurrent representation of the retention mechanism.""" + # Apply projection to q_proj, k_proj, v_proj + q_proj = query_n @ self.w_q + k_proj = key_n @ self.w_k + v_proj = value_n @ self.w_v + + # Apply the retention mechanism and update the hidden state + updated_hstate = hstate + (k_proj.transpose(0, -1, -2) @ v_proj) + ret = q_proj @ updated_hstate + + return ret, updated_hstate + + def get_decay_matrix(self, dones: Array) -> Array: + """Get the decay matrix for the full sequence based on the dones and retention type.""" + # Extract done information at the timestep level + timestep_dones = dones[:, :: self.n_agents] # B, T + + # B, T, T + timestep_mask = self._get_decay_matrix_mask_timestep(timestep_dones) + decay_matrix = self._get_default_decay_matrix(timestep_dones) + decay_matrix *= timestep_mask + + # B, T, T -> B, T * N, T * N + decay_matrix = jnp.repeat( + jnp.repeat(decay_matrix, self.n_agents, axis=1), self.n_agents, axis=2 + ) + + # Apply a causal mask over agents if full self-retention is disabled + # This converts it from a blocked decay matrix to a causal decay matrix + decay_matrix = self._causal_mask(decay_matrix) + + return decay_matrix + + def _causal_mask(self, matrix: Array) -> Array: + """Applies a causal mask to the input matrix if `masked` is True.""" + if self.masked: + mask_agents = jnp.tril(jnp.ones((matrix.shape[1], matrix.shape[1]))) + matrix = mask_agents[None, :, :] * matrix + return matrix + + def _get_decay_matrix_mask_timestep(self, ts_dones: Array) -> Array: + """Generates a mask over the timesteps based on the done status of agents. + + If there is a termination on timestep t, then the decay matrix should be + restarted from index (t, t). See the section Adapting the decay matrix for MARL + for a full explanation: https://arxiv.org/pdf/2410.01706 + """ + # Get the shape of the input: batch size and number of timesteps + B, T = ts_dones.shape + + # Initialise the mask + timestep_mask = jnp.zeros((B, T, T), dtype=bool) + all_false = jnp.zeros((B, T, T), dtype=bool) + + # Iterate over the timesteps and apply the mask + for i in range(T): + done_this_step = ts_dones[:, i, jnp.newaxis, jnp.newaxis] + ts_done_xs = all_false.at[:, i:, :].set(done_this_step) + ts_done_ys = all_false.at[:, :, :i].set(done_this_step) + + # Combine the x and y masks to get the mask for the current timestep. + timestep_mask |= ts_done_xs & ts_done_ys + + return ~timestep_mask + + def _get_default_decay_matrix(self, dones: Array) -> Array: + """Compute the decay matrix without taking into account the timestep-based masking.""" + # Get the shape of the input: batch size and number of timesteps + B, T = dones.shape + + # Create the n and m matrices + n = jnp.arange(T)[:, jnp.newaxis, ...] + m = jnp.arange(T)[jnp.newaxis, ...] + + # Decay based on difference in timestep indices. + decay_matrix = (self.decay_kappa ** (n - m)) * (n >= m) + # Replace NaN values with 0 + decay_matrix = jnp.nan_to_num(decay_matrix) + + # Adjust for batch size + decay_matrix = jnp.broadcast_to(decay_matrix, (B, T, T)) + + return decay_matrix + + def get_xi(self, dones: Array) -> Array: + """Computes a decaying matrix 'xi', which decays over time until the first done signal.""" + # Get done status for each timestep by slicing out the agent dimension + timestep_dones = dones[:, :: self.n_agents] + B, T = timestep_dones.shape + + # Compute the first done step for each sequence, + # or set it to sequence length if no dones exist + first_dones = jnp.where( + ~jnp.any(timestep_dones, axis=1, keepdims=True), + jnp.full((B, 1), T), + jnp.argmax(timestep_dones, axis=1, keepdims=True), + ) + + xi = jnp.zeros((B, T, 1)) + # Fill 'xi' with decaying values up until the first done step + for i in range(T): + before_first_done = i < first_dones + xi_i = (self.decay_kappa ** (i + 1)) * before_first_done + xi = xi.at[:, i, :].set(xi_i) + + # Repeat the decay matrix 'xi' for all agents + xi = jnp.repeat(xi, self.n_agents, axis=1) + + return xi + + +class MultiScaleRetention(nn.Module): + """Multi-scale retention mechanism for Sable.""" + + embed_dim: int + n_head: int + n_agents: int + memory_config: DictConfig + masked: bool = True + decay_scaling_factor: float = 1.0 + + def setup(self) -> None: + assert self.embed_dim % self.n_head == 0, "embed_dim must be divisible by n_head" + self.head_size = self.embed_dim // self.n_head + + # Decay kappa for each head + self.decay_kappas = 1 - jnp.exp( + jnp.linspace(jnp.log(1 / 32), jnp.log(1 / 512), self.n_head) + ) + self.decay_kappas = self.decay_kappas * self.decay_scaling_factor + + # Initialise the weights and group norm + self.w_g = self.param( + "w_g", + nn.initializers.normal(stddev=1 / self.embed_dim), + (self.embed_dim, self.head_size), + ) + self.w_o = self.param( + "w_o", + nn.initializers.normal(stddev=1 / self.embed_dim), + (self.head_size, self.embed_dim), + ) + self.group_norm = nn.GroupNorm(num_groups=self.n_head) + + # Initialise the retention mechanisms + self.retention_heads = [ + SimpleRetention( + self.embed_dim, + self.head_size, + self.n_agents, + self.masked, + decay_kappa, + self.memory_config, + ) + for decay_kappa in self.decay_kappas + ] + + # Create an instance of the positional encoding + self.pe = PositionalEncoding(self.embed_dim) + + def __call__( + self, + key: Array, + query: Array, + value: Array, + hstate: Array, + dones: Array, + step_count: Array, + ) -> Tuple[Array, Array]: + """Chunkwise (default) representation of the multi-scale retention mechanism""" + B, C, _ = value.shape + + # Positional encoding of the current step + if self.memory_config.timestep_positional_encoding: + key, query, value = self.pe(key, query, value, step_count) + + ret_output = jnp.zeros((B, C, self.head_size), dtype=value.dtype) + for head in range(self.n_head): + y, new_hs = self.retention_heads[head](key, query, value, hstate[:, head], dones) + ret_output = ret_output.at[ + :, :, self.head_size * head : self.head_size * (head + 1) + ].set(y) + hstate = hstate.at[:, head, :, :].set(new_hs) + + ret_output = self.group_norm(ret_output.reshape(-1, self.head_size)).reshape( + ret_output.shape + ) + + x = key + output = (jax.nn.swish(x @ self.w_g) * ret_output) @ self.w_o + return output, hstate + + def recurrent( + self, key_n: Array, query_n: Array, value_n: Array, hstate: Array, step_count: Array + ) -> Tuple[Array, Array]: + """Recurrent representation of the multi-scale retention mechanism""" + B, S, _ = value_n.shape + + # Positional encoding of the current step if enabled + if self.memory_config.timestep_positional_encoding: + key_n, query_n, value_n = self.pe(key_n, query_n, value_n, step_count) + + ret_output = jnp.zeros((B, S, self.head_size), dtype=value_n.dtype) + for head in range(self.n_head): + y, new_hs = self.retention_heads[head].recurrent( + key_n, query_n, value_n, hstate[:, head] + ) + ret_output = ret_output.at[ + :, :, self.head_size * head : self.head_size * (head + 1) + ].set(y) + hstate = hstate.at[:, head, :, :].set(new_hs) + + ret_output = self.group_norm(ret_output.reshape(-1, self.head_size)).reshape( + ret_output.shape + ) + + x = key_n + output = (jax.nn.swish(x @ self.w_g) * ret_output) @ self.w_o + return output, hstate diff --git a/mava/networks/sable_network.py b/mava/networks/sable_network.py new file mode 100644 index 000000000..e626bfc16 --- /dev/null +++ b/mava/networks/sable_network.py @@ -0,0 +1,473 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Optional, Tuple + +import chex +import jax.numpy as jnp +from flax import linen as nn +from flax.linen.initializers import orthogonal +from jax import tree +from omegaconf import DictConfig + +from mava.networks.retention import MultiScaleRetention +from mava.networks.torsos import SwiGLU +from mava.networks.utils.sable import ( + act_encoder_fn, + autoregressive_act, + train_decoder_fn, + train_encoder_fn, +) +from mava.systems.sable.types import HiddenStates, SableNetworkConfig +from mava.types import Observation +from mava.utils.network_utils import _CONTINUOUS, _DISCRETE + + +class EncodeBlock(nn.Module): + """Sable encoder block.""" + + net_config: SableNetworkConfig + memory_config: DictConfig + n_agents: int + + def setup(self) -> None: + self.ln1 = nn.RMSNorm() + self.ln2 = nn.RMSNorm() + + self.retn = MultiScaleRetention( + embed_dim=self.net_config.embed_dim, + n_head=self.net_config.n_head, + n_agents=self.n_agents, + masked=False, # Full retention for the encoder + memory_config=self.memory_config, + decay_scaling_factor=self.memory_config.decay_scaling_factor, + ) + + self.ffn = SwiGLU(self.net_config.embed_dim, self.net_config.embed_dim) + + def __call__( + self, x: chex.Array, hstate: chex.Array, dones: chex.Array, step_count: chex.Array + ) -> chex.Array: + """Applies Chunkwise MultiScaleRetention.""" + ret, updated_hstate = self.retn( + key=x, query=x, value=x, hstate=hstate, dones=dones, step_count=step_count + ) + x = self.ln1(x + ret) + output = self.ln2(x + self.ffn(x)) + return output, updated_hstate + + def recurrent(self, x: chex.Array, hstate: chex.Array, step_count: chex.Array) -> chex.Array: + """Applies Recurrent MultiScaleRetention.""" + ret, updated_hstate = self.retn.recurrent( + key_n=x, query_n=x, value_n=x, hstate=hstate, step_count=step_count + ) + x = self.ln1(x + ret) + output = self.ln2(x + self.ffn(x)) + return output, updated_hstate + + +class Encoder(nn.Module): + """Multi-block encoder consisting of multiple `EncoderBlock` modules.""" + + net_config: SableNetworkConfig + memory_config: DictConfig + n_agents: int + + def setup(self) -> None: + self.ln = nn.RMSNorm() + + self.obs_encoder = nn.Sequential( + [ + nn.RMSNorm(), + nn.Dense( + self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2)), use_bias=False + ), + nn.gelu, + ], + ) + self.head = nn.Sequential( + [ + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + nn.RMSNorm(), + nn.Dense(1, kernel_init=orthogonal(0.01)), + ], + ) + + self.blocks = [ + EncodeBlock( + self.net_config, + self.memory_config, + self.n_agents, + name=f"encoder_block_{block_id}", + ) + for block_id in range(self.net_config.n_block) + ] + + def __call__( + self, obs: chex.Array, hstate: chex.Array, dones: chex.Array, step_count: chex.Array + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Apply chunkwise encoding.""" + updated_hstate = jnp.zeros_like(hstate) + obs_rep = self.obs_encoder(obs) + + # Apply the encoder blocks + for i, block in enumerate(self.blocks): + hs = hstate[:, :, i] # Get the hidden state for the current block + # Apply the chunkwise encoder block + obs_rep, hs_new = block(self.ln(obs_rep), hs, dones, step_count) + updated_hstate = updated_hstate.at[:, :, i].set(hs_new) + + value = self.head(obs_rep) + + return value, obs_rep, updated_hstate + + def recurrent( + self, obs: chex.Array, hstate: chex.Array, step_count: chex.Array + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Apply recurrent encoding.""" + updated_hstate = jnp.zeros_like(hstate) + obs_rep = self.obs_encoder(obs) + + # Apply the encoder blocks + for i, block in enumerate(self.blocks): + hs = hstate[:, :, i] # Get the hidden state for the current block + # Apply the recurrent encoder block + obs_rep, hs_new = block.recurrent(self.ln(obs_rep), hs, step_count) + updated_hstate = updated_hstate.at[:, :, i].set(hs_new) + + # Compute the value function + value = self.head(obs_rep) + + return value, obs_rep, updated_hstate + + +class DecodeBlock(nn.Module): + """Sable decoder block.""" + + net_config: SableNetworkConfig + memory_config: DictConfig + n_agents: int + + def setup(self) -> None: + self.ln1, self.ln2, self.ln3 = nn.RMSNorm(), nn.RMSNorm(), nn.RMSNorm() + + self.retn1 = MultiScaleRetention( + embed_dim=self.net_config.embed_dim, + n_head=self.net_config.n_head, + n_agents=self.n_agents, + masked=True, # Masked retention for the decoder + memory_config=self.memory_config, + decay_scaling_factor=self.memory_config.decay_scaling_factor, + ) + self.retn2 = MultiScaleRetention( + embed_dim=self.net_config.embed_dim, + n_head=self.net_config.n_head, + n_agents=self.n_agents, + masked=True, # Masked retention for the decoder + memory_config=self.memory_config, + decay_scaling_factor=self.memory_config.decay_scaling_factor, + ) + + self.ffn = SwiGLU(self.net_config.embed_dim, self.net_config.embed_dim) + + def __call__( + self, + x: chex.Array, + obs_rep: chex.Array, + hstates: Tuple[chex.Array, chex.Array], + dones: chex.Array, + step_count: chex.Array, + ) -> Tuple[chex.Array, Tuple[chex.Array, chex.Array]]: + """Applies Chunkwise MultiScaleRetention.""" + hs1, hs2 = hstates + + # Apply the self-retention over actions + ret, hs1_new = self.retn1( + key=x, query=x, value=x, hstate=hs1, dones=dones, step_count=step_count + ) + ret = self.ln1(x + ret) + + # Apply the cross-retention over obs x action + ret2, hs2_new = self.retn2( + key=ret, + query=obs_rep, + value=ret, + hstate=hs2, + dones=dones, + step_count=step_count, + ) + y = self.ln2(obs_rep + ret2) + output = self.ln3(y + self.ffn(y)) + + return output, (hs1_new, hs2_new) + + def recurrent( + self, + x: chex.Array, + obs_rep: chex.Array, + hstates: Tuple[chex.Array, chex.Array], + step_count: chex.Array, + ) -> Tuple[chex.Array, Tuple[chex.Array, chex.Array]]: + """Applies Recurrent MultiScaleRetention.""" + hs1, hs2 = hstates + + # Apply the self-retention over actions + ret, hs1_new = self.retn1.recurrent( + key_n=x, query_n=x, value_n=x, hstate=hs1, step_count=step_count + ) + ret = self.ln1(x + ret) + + # Apply the cross-retention over obs x action + ret2, hs2_new = self.retn2.recurrent( + key_n=ret, query_n=obs_rep, value_n=ret, hstate=hs2, step_count=step_count + ) + y = self.ln2(obs_rep + ret2) + output = self.ln3(y + self.ffn(y)) + + return output, (hs1_new, hs2_new) + + +class Decoder(nn.Module): + """Multi-block decoder consisting of multiple `DecoderBlock` modules.""" + + net_config: SableNetworkConfig + memory_config: DictConfig + n_agents: int + action_dim: int + action_space_type: str = _DISCRETE + + def setup(self) -> None: + self.ln = nn.RMSNorm() + + use_bias = self.action_space_type == _CONTINUOUS + self.action_encoder = nn.Sequential( + [ + nn.Dense( + self.net_config.embed_dim, + use_bias=use_bias, + kernel_init=orthogonal(jnp.sqrt(2)), + ), + nn.gelu, + ], + ) + + # Always initialize log_std but set to None for discrete action spaces + # This ensures the attribute exists but signals it should not be used. + self.log_std = ( + self.param("log_std", nn.initializers.zeros, (self.action_dim,)) + if self.action_space_type == _CONTINUOUS + else None + ) + + self.head = nn.Sequential( + [ + nn.Dense(self.net_config.embed_dim, kernel_init=orthogonal(jnp.sqrt(2))), + nn.gelu, + nn.RMSNorm(), + nn.Dense(self.action_dim, kernel_init=orthogonal(0.01)), + ], + ) + + self.blocks = [ + DecodeBlock( + self.net_config, + self.memory_config, + self.n_agents, + name=f"decoder_block_{block_id}", + ) + for block_id in range(self.net_config.n_block) + ] + + def __call__( + self, + action: chex.Array, + obs_rep: chex.Array, + hstates: Tuple[chex.Array, chex.Array], + dones: chex.Array, + step_count: chex.Array, + ) -> Tuple[chex.Array, Tuple[chex.Array, chex.Array]]: + """Apply chunkwise decoding.""" + updated_hstates = tree.map(jnp.zeros_like, hstates) + action_embeddings = self.action_encoder(action) + x = self.ln(action_embeddings) + + # Apply the decoder blocks + for i, block in enumerate(self.blocks): + hs = tree.map(lambda x, j=i: x[:, :, j], hstates) + x, hs_new = block(x=x, obs_rep=obs_rep, hstates=hs, dones=dones, step_count=step_count) + updated_hstates = tree.map( + lambda x, y, j=i: x.at[:, :, j].set(y), updated_hstates, hs_new + ) + + logit = self.head(x) + + return logit, updated_hstates + + def recurrent( + self, + action: chex.Array, + obs_rep: chex.Array, + hstates: Tuple[chex.Array, chex.Array], + step_count: chex.Array, + ) -> Tuple[chex.Array, Tuple[chex.Array, chex.Array]]: + """Apply recurrent decoding.""" + updated_hstates = tree.map(jnp.zeros_like, hstates) + action_embeddings = self.action_encoder(action) + x = self.ln(action_embeddings) + + # Apply the decoder blocks + for i, block in enumerate(self.blocks): + hs = tree.map(lambda x, i=i: x[:, :, i], hstates) + x, hs_new = block.recurrent(x=x, obs_rep=obs_rep, hstates=hs, step_count=step_count) + updated_hstates = tree.map( + lambda x, y, j=i: x.at[:, :, j].set(y), updated_hstates, hs_new + ) + + logit = self.head(x) + + return logit, updated_hstates + + +class SableNetwork(nn.Module): + """Sable network module.""" + + n_agents: int + n_agents_per_chunk: int + action_dim: int + net_config: SableNetworkConfig + memory_config: DictConfig + action_space_type: str = _DISCRETE + + def setup(self) -> None: + if self.action_space_type not in [_DISCRETE]: + raise ValueError(f"Invalid action space type: {self.action_space_type}") + + assert ( + self.memory_config.decay_scaling_factor >= 0 + and self.memory_config.decay_scaling_factor <= 1 + ), "Decay scaling factor should be between 0 and 1" + + # Decay kappa for each head + self.decay_kappas = 1 - jnp.exp( + jnp.linspace(jnp.log(1 / 32), jnp.log(1 / 512), self.net_config.n_head) + ) + self.decay_kappas = self.decay_kappas * self.memory_config.decay_scaling_factor + self.decay_kappas = self.decay_kappas[None, :, None, None, None] + + self.encoder = Encoder( + self.net_config, + self.memory_config, + self.n_agents_per_chunk, + ) + self.decoder = Decoder( + self.net_config, + self.memory_config, + self.n_agents_per_chunk, + self.action_dim, + self.action_space_type, + ) + + # Set the actor and trainer functions + self.train_encoder_fn = partial( + train_encoder_fn, + chunk_size=self.memory_config.chunk_size, + ) + self.train_decoder_fn = partial( + train_decoder_fn, n_agents=self.n_agents, chunk_size=self.memory_config.chunk_size + ) + + self.act_encoder_fn = partial( + act_encoder_fn, + chunk_size=self.n_agents_per_chunk, + ) + self.autoregressive_act = autoregressive_act + + def __call__( + self, + observation: Observation, + action: chex.Array, + hstates: HiddenStates, + dones: chex.Array, + rng_key: Optional[chex.PRNGKey] = None, + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Training phase.""" + obs, legal_actions, step_count = ( + observation.agents_view, + observation.action_mask, + observation.step_count, + ) + value, obs_rep, _ = self.train_encoder_fn( + encoder=self.encoder, obs=obs, hstate=hstates[0], dones=dones, step_count=step_count + ) + + action_log, entropy = self.train_decoder_fn( + decoder=self.decoder, + obs_rep=obs_rep, + action=action, + legal_actions=legal_actions, + hstates=hstates[1:], + dones=dones, + step_count=step_count, + rng_key=rng_key, + ) + + action_log = jnp.squeeze(action_log, axis=-1) + value = jnp.squeeze(value, axis=-1) + entropy = jnp.squeeze(entropy, axis=-1) + return value, action_log, entropy + + def get_actions( + self, + observation: Observation, + hstates: HiddenStates, + key: chex.PRNGKey, + ) -> Tuple[chex.Array, chex.Array, chex.Array, HiddenStates]: + """Inference phase.""" + obs, legal_actions, step_count = ( + observation.agents_view, + observation.action_mask, + observation.step_count, + ) + + # Decay the hidden states: each timestep we decay the hidden states once + decayed_hstates = tree.map(lambda x: x * self.decay_kappas, hstates) + + value, obs_rep, updated_enc_hs = self.act_encoder_fn( + encoder=self.encoder, + obs=obs, + decayed_hstate=decayed_hstates[0], + step_count=step_count, + ) + + output_actions, output_actions_log, updated_dec_hs = self.autoregressive_act( + decoder=self.decoder, + obs_rep=obs_rep, + legal_actions=legal_actions, + hstates=decayed_hstates[1:], + step_count=step_count, + key=key, + ) + + updated_hs = HiddenStates( + encoder=updated_enc_hs, + decoder_self_retn=updated_dec_hs[0], + decoder_cross_retn=updated_dec_hs[1], + ) + + output_actions = jnp.squeeze(output_actions, axis=-1) + output_actions_log = jnp.squeeze(output_actions_log, axis=-1) + value = jnp.squeeze(value, axis=-1) + return output_actions, output_actions_log, value, updated_hs diff --git a/mava/networks/torsos.py b/mava/networks/torsos.py index e8a40297d..7602fb245 100644 --- a/mava/networks/torsos.py +++ b/mava/networks/torsos.py @@ -27,6 +27,7 @@ class MLPTorso(nn.Module): layer_sizes: Sequence[int] activation: str = "relu" use_layer_norm: bool = False + activate_final: bool = True def setup(self) -> None: self.activation_fn = _parse_activation_fn(self.activation) @@ -35,11 +36,14 @@ def setup(self) -> None: def __call__(self, observation: chex.Array) -> chex.Array: """Forward pass.""" x = observation - for layer_size in self.layer_sizes: + for i, layer_size in enumerate(self.layer_sizes): x = nn.Dense(layer_size, kernel_init=orthogonal(np.sqrt(2)))(x) if self.use_layer_norm: x = nn.LayerNorm(use_scale=False)(x) - x = self.activation_fn(x) + + should_activate = (i < len(self.layer_sizes) - 1) or self.activate_final + x = self.activation_fn(x) if should_activate else x + return x @@ -59,7 +63,9 @@ def setup(self) -> None: def __call__(self, observation: chex.Array) -> chex.Array: """Forward pass.""" x = observation - for channel, kernel, stride in zip(self.channel_sizes, self.kernel_sizes, self.strides): + for channel, kernel, stride in zip( + self.channel_sizes, self.kernel_sizes, self.strides, strict=True + ): x = nn.Conv(channel, (kernel, kernel), (stride, stride))(x) if self.use_layer_norm: x = nn.LayerNorm(use_scale=False)(x) @@ -70,6 +76,29 @@ def __call__(self, observation: chex.Array) -> chex.Array: return jax.lax.collapse(x, -3) +class SwiGLU(nn.Module): + """SwiGLU module. + A gated variation of a standard feedforward layer using a Swish activation function. + For more details see: https://arxiv.org/abs/2002.05202 + """ + + hidden_dim: int + embed_dim: int + + def setup(self) -> None: + self.W_linear = self.param( + "W_linear", nn.initializers.zeros, (self.embed_dim, self.hidden_dim) + ) + self.W_gate = self.param("W_gate", nn.initializers.zeros, (self.embed_dim, self.hidden_dim)) + self.W_output = self.param( + "W_output", nn.initializers.zeros, (self.hidden_dim, self.embed_dim) + ) + + def __call__(self, x: chex.Array) -> chex.Array: + gated_output = jax.nn.swish(x @ self.W_gate) * (x @ self.W_linear) + return gated_output @ self.W_output + + def _parse_activation_fn(activation_fn_name: str) -> Callable[[chex.Array], chex.Array]: """Get the activation function.""" activation_fns: Dict[str, Callable[[chex.Array], chex.Array]] = { diff --git a/mava/version.py b/mava/networks/utils/__init__.py similarity index 96% rename from mava/version.py rename to mava/networks/utils/__init__.py index c6d613e4a..21db9ec1c 100644 --- a/mava/version.py +++ b/mava/networks/utils/__init__.py @@ -11,5 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -__version__ = "0.2.0" diff --git a/mava/networks/utils/mat/__init__.py b/mava/networks/utils/mat/__init__.py new file mode 100644 index 000000000..21db9ec1c --- /dev/null +++ b/mava/networks/utils/mat/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/mava/networks/utils/mat/decode.py b/mava/networks/utils/mat/decode.py new file mode 100644 index 000000000..c998b23be --- /dev/null +++ b/mava/networks/utils/mat/decode.py @@ -0,0 +1,161 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union + +import chex +import jax +import jax.numpy as jnp +import tensorflow_probability.substrates.jax.distributions as tfd +from flax import linen as nn + +from mava.networks.distributions import IdentityTransformation, TanhTransformedDistribution + +# General shapes legend: +# B: batch size +# N: number of agents +# O: observation dimension +# A: action dimension +# E: model embedding dimension + + +def discrete_parallel_act( + decoder: nn.Module, + obs_rep: chex.Array, # (B, N, E) + action: chex.Array, # (B, N) + action_dim: int, # (, ) + legal_actions: chex.Array, # (B, N, A) + key: chex.PRNGKey, +) -> Tuple[chex.Array, chex.Array]: + B, N, _ = obs_rep.shape + one_hot_action = jax.nn.one_hot(action, action_dim) # (B, A) + shifted_action = jnp.zeros((B, N, action_dim + 1)) # (B, N, A +1) + shifted_action = shifted_action.at[:, 0, 0].set(1) + shifted_action = shifted_action.at[:, 1:, 1:].set(one_hot_action[:, :-1, :]) + logit = decoder(shifted_action, obs_rep) # (B, N, A) + + masked_logits = jnp.where( + legal_actions, + logit, + jnp.finfo(jnp.float32).min, + ) + + distribution = IdentityTransformation(distribution=tfd.Categorical(logits=masked_logits)) + action_log_prob = distribution.log_prob(action) + entropy = distribution.entropy(seed=key) + + return action_log_prob, entropy # (B, N), (B, N) + + +def continuous_parallel_act( + decoder: nn.Module, + obs_rep: chex.Array, # (B, N, E) + action: chex.Array, # (B, N, A) + action_dim: int, # (, ) + legal_actions: chex.Array, # (B, N, A) + key: chex.PRNGKey, +) -> Tuple[chex.Array, chex.Array]: + # We don't need legal_actions for continuous actions but keep it to keep the APIs consistent. + del legal_actions + B, N, _ = obs_rep.shape + shifted_action = jnp.zeros((B, N, action_dim)) + + shifted_action = shifted_action.at[:, 1:, :].set(action[:, :-1, :]) + + act_mean = decoder(shifted_action, obs_rep) # (B, N, A) + action_std = jax.nn.softplus(decoder.log_std) + + distribution = tfd.Normal(loc=act_mean, scale=action_std) + distribution = tfd.Independent( + TanhTransformedDistribution(distribution), + reinterpreted_batch_ndims=1, + ) + action_log_prob = distribution.log_prob(action) + entropy = distribution.entropy(seed=key) + + return action_log_prob, entropy # (B, N), (B, N) + + +def discrete_autoregressive_act( + decoder: nn.Module, + obs_rep: chex.Array, # (B, N, E) + action_dim: int, # (, ) + legal_actions: chex.Array, # (B, N, A) + key: chex.PRNGKey, +) -> Tuple[chex.Array, chex.Array]: + B, N, _ = obs_rep.shape + shifted_action = jnp.zeros((B, N, action_dim + 1)) + shifted_action = shifted_action.at[:, 0, 0].set(1) + output_action = jnp.zeros((B, N)) + output_action_log = jnp.zeros_like(output_action) + + for i in range(N): + logit = decoder(shifted_action, obs_rep)[:, i, :] # (B, A) + masked_logits = jnp.where( + legal_actions[:, i, :], + logit, + jnp.finfo(jnp.float32).min, + ) + key, sample_key = jax.random.split(key) + + distribution = IdentityTransformation(distribution=tfd.Categorical(logits=masked_logits)) + action = distribution.sample(seed=sample_key) # (B, ) + action_log = distribution.log_prob(action) # (B, ) + + output_action = output_action.at[:, i].set(action) + output_action_log = output_action_log.at[:, i].set(action_log) + + # Adds all except the last action to shifted_actions, as it is out of range + shifted_action = shifted_action.at[:, i + 1, 1:].set( + jax.nn.one_hot(action, action_dim), mode="drop" + ) + + return output_action.astype(jnp.int32), output_action_log # (B, N), (B, N) + + +def continuous_autoregressive_act( + decoder: nn.Module, + obs_rep: chex.Array, # (B, N, E) + action_dim: int, # (, ) + legal_actions: Union[chex.Array, None], + key: chex.PRNGKey, +) -> Tuple[chex.Array, chex.Array]: + # We don't need legal_actions for continuous actions but keep it to keep the APIs consistent. + del legal_actions + B, N, _ = obs_rep.shape + shifted_action = jnp.zeros((B, N, action_dim)) + output_action = jnp.zeros((B, N, action_dim)) + output_action_log = jnp.zeros((B, N)) + + for i in range(N): + act_mean = decoder(shifted_action, obs_rep)[:, i, :] # (B, A) + action_std = jax.nn.softplus(decoder.log_std) + + key, sample_key = jax.random.split(key) + + distribution = tfd.Normal(loc=act_mean, scale=action_std) + distribution = tfd.Independent( + TanhTransformedDistribution(distribution), + reinterpreted_batch_ndims=1, + ) + action = distribution.sample(seed=sample_key) # (B, A) + action_log = distribution.log_prob(action) # (B,) + + output_action = output_action.at[:, i, :].set(action) + output_action_log = output_action_log.at[:, i].set(action_log) + + # Adds all except the last action to shifted_actions, as it is out of range + shifted_action = shifted_action.at[:, i + 1, :].set(action, mode="drop") + + return output_action, output_action_log # (B, N, A), (B, N) diff --git a/mava/networks/utils/sable/__init__.py b/mava/networks/utils/sable/__init__.py new file mode 100644 index 000000000..d26b9f645 --- /dev/null +++ b/mava/networks/utils/sable/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ruff: noqa: F401 + +from mava.networks.utils.sable.decode import ( + autoregressive_act, + train_decoder_fn, +) +from mava.networks.utils.sable.encode import ( + act_encoder_fn, + train_encoder_fn, +) +from mava.networks.utils.sable.get_init_hstates import get_init_hidden_state +from mava.networks.utils.sable.positional_encoding import PositionalEncoding diff --git a/mava/networks/utils/sable/decode.py b/mava/networks/utils/sable/decode.py new file mode 100644 index 000000000..c9befeb36 --- /dev/null +++ b/mava/networks/utils/sable/decode.py @@ -0,0 +1,145 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import chex +import distrax +import jax +import jax.numpy as jnp +from flax import linen as nn + +# General shapes legend: +# B: batch size +# S: sequence length +# A: number of actions +# N: number of agents + + +def train_decoder_fn( + decoder: nn.Module, + obs_rep: chex.Array, + action: chex.Array, + legal_actions: chex.Array, + hstates: chex.Array, + dones: chex.Array, + step_count: chex.Array, + n_agents: int, + chunk_size: int, + rng_key: Optional[chex.PRNGKey] = None, +) -> Tuple[chex.Array, chex.Array]: + """Parallel action sampling for discrete action spaces.""" + # Delete `rng_key` since it is not used in discrete action space + del rng_key + + shifted_actions = get_shifted_actions(action, legal_actions, n_agents=n_agents) + logit = jnp.zeros_like(legal_actions, dtype=jnp.float32) + + # Apply the decoder per chunk + num_chunks = shifted_actions.shape[1] // chunk_size + for chunk_id in range(0, num_chunks): + start_idx = chunk_id * chunk_size + end_idx = (chunk_id + 1) * chunk_size + # Chunk obs_rep, shifted_actions, dones, and step_count + chunked_obs_rep = obs_rep[:, start_idx:end_idx] + chunk_shifted_actions = shifted_actions[:, start_idx:end_idx] + chunk_dones = dones[:, start_idx:end_idx] + chunk_step_count = step_count[:, start_idx:end_idx] + chunk_logit, hstates = decoder( + action=chunk_shifted_actions, + obs_rep=chunked_obs_rep, + hstates=hstates, + dones=chunk_dones, + step_count=chunk_step_count, + ) + logit = logit.at[:, start_idx:end_idx].set(chunk_logit) + + masked_logits = jnp.where( + legal_actions, + logit, + jnp.finfo(jnp.float32).min, + ) + + distribution = distrax.Categorical(logits=masked_logits) + action_log_prob = distribution.log_prob(action) + action_log_prob = jnp.expand_dims(action_log_prob, axis=-1) + entropy = jnp.expand_dims(distribution.entropy(), axis=-1) + + return action_log_prob, entropy + + +def get_shifted_actions(action: chex.Array, legal_actions: chex.Array, n_agents: int) -> chex.Array: + """Get the shifted action sequence for predicting the next action.""" + B, S, A = legal_actions.shape + + # Create a shifted action sequence for predicting the next action + shifted_actions = jnp.zeros((B, S, A + 1)) + + # Set the start-of-timestep token (first action as a "start" signal) + start_timestep_token = jnp.zeros(A + 1).at[0].set(1) + + # One hot encode the action + one_hot_action = jax.nn.one_hot(action, A) + + # Insert one-hot encoded actions into shifted array, shifting by 1 position + shifted_actions = shifted_actions.at[:, :, 1:].set(one_hot_action) + shifted_actions = jnp.roll(shifted_actions, shift=1, axis=1) + + # Set the start token for the first agent in each timestep + shifted_actions = shifted_actions.at[:, ::n_agents, :].set(start_timestep_token) + + return shifted_actions + + +def autoregressive_act( + decoder: nn.Module, + obs_rep: chex.Array, + hstates: chex.Array, + legal_actions: chex.Array, + step_count: chex.Array, + key: chex.PRNGKey, +) -> Tuple[chex.Array, chex.Array, chex.Array]: + B, N, A = legal_actions.shape + + shifted_actions = jnp.zeros((B, N, A + 1)) + shifted_actions = shifted_actions.at[:, 0, 0].set(1) + + output_action = jnp.zeros((B, N, 1)) + output_action_log = jnp.zeros_like(output_action) + + # Apply the decoder autoregressively + for i in range(N): + logit, hstates = decoder.recurrent( + action=shifted_actions[:, i : i + 1, :], + obs_rep=obs_rep[:, i : i + 1, :], + hstates=hstates, + step_count=step_count[:, i : i + 1], + ) + masked_logits = jnp.where( + legal_actions[:, i : i + 1, :], + logit, + jnp.finfo(jnp.float32).min, + ) + distribution = distrax.Categorical(logits=masked_logits) + key, sample_key = jax.random.split(key) + action, action_log = distribution.sample_and_log_prob(seed=sample_key) + output_action = output_action.at[:, i, :].set(action) + output_action_log = output_action_log.at[:, i, :].set(action_log) + + # Adds all except the last action to shifted_actions, as it is out of range. + shifted_actions = shifted_actions.at[:, i + 1, 1:].set( + jax.nn.one_hot(action[:, 0], A), mode="drop" + ) + + return output_action.astype(jnp.int32), output_action_log, hstates diff --git a/mava/networks/utils/sable/encode.py b/mava/networks/utils/sable/encode.py new file mode 100644 index 000000000..ba62cce69 --- /dev/null +++ b/mava/networks/utils/sable/encode.py @@ -0,0 +1,84 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import chex +import jax.numpy as jnp +from flax import linen as nn + +# General shapes legend: +# B: batch size +# S: sequence length +# C: number of agents per chunk of sequence + + +def train_encoder_fn( + encoder: nn.Module, + obs: chex.Array, + hstate: chex.Array, + dones: chex.Array, + step_count: chex.Array, + chunk_size: int, +) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Chunkwise encoding for discrete action spaces.""" + B, S = obs.shape[:2] + v_loc = jnp.zeros((B, S, 1)) + obs_rep = jnp.zeros((B, S, encoder.net_config.embed_dim)) + + # Apply the encoder per chunk + num_chunks = S // chunk_size + for chunk_id in range(0, num_chunks): + start_idx = chunk_id * chunk_size + end_idx = (chunk_id + 1) * chunk_size + # Chunk obs, dones, and step_count + chunk_obs = obs[:, start_idx:end_idx] + chunk_dones = dones[:, start_idx:end_idx] + chunk_step_count = step_count[:, start_idx:end_idx] + chunk_v_loc, chunk_obs_rep, hstate = encoder( + chunk_obs, hstate, chunk_dones, chunk_step_count + ) + v_loc = v_loc.at[:, start_idx:end_idx].set(chunk_v_loc) + obs_rep = obs_rep.at[:, start_idx:end_idx].set(chunk_obs_rep) + + return v_loc, obs_rep, hstate + + +def act_encoder_fn( + encoder: nn.Module, + obs: chex.Array, + decayed_hstate: chex.Array, + step_count: chex.Array, + chunk_size: int, +) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Chunkwise encoding for ff-Sable and for discrete action spaces.""" + B, C = obs.shape[:2] + v_loc = jnp.zeros((B, C, 1)) + obs_rep = jnp.zeros((B, C, encoder.net_config.embed_dim)) + + # Apply the encoder per chunk + num_chunks = C // chunk_size + for chunk_id in range(0, num_chunks): + start_idx = chunk_id * chunk_size + end_idx = (chunk_id + 1) * chunk_size + # Chunk obs and step_count + chunk_obs = obs[:, start_idx:end_idx] + chunk_step_count = step_count[:, start_idx:end_idx] + chunk_v_loc, chunk_obs_rep, decayed_hstate = encoder.recurrent( + chunk_obs, decayed_hstate, chunk_step_count + ) + v_loc = v_loc.at[:, start_idx:end_idx].set(chunk_v_loc) + obs_rep = obs_rep.at[:, start_idx:end_idx].set(chunk_obs_rep) + + return v_loc, obs_rep, decayed_hstate diff --git a/mava/networks/utils/sable/get_init_hstates.py b/mava/networks/utils/sable/get_init_hstates.py new file mode 100644 index 000000000..6393e403e --- /dev/null +++ b/mava/networks/utils/sable/get_init_hstates.py @@ -0,0 +1,43 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import jax.numpy as jnp + +from mava.systems.sable.types import HiddenStates, SableNetworkConfig + + +def get_init_hidden_state(actor_net_config: SableNetworkConfig, batch_size: int) -> HiddenStates: + """Initializes the hidden states for the encoder and decoder.""" + # Compute the hidden state size based on embedding dimension and number of heads + hidden_size = actor_net_config.embed_dim // actor_net_config.n_head + + # Define the shape of the hidden states + hidden_state_shape = ( + batch_size, + actor_net_config.n_head, + actor_net_config.n_block, + hidden_size, + hidden_size, + ) + + # Initialize hidden states for encoder and decoder + dec_hs_self_retn = jnp.zeros(hidden_state_shape) + dec_hs_cross_retn = jnp.zeros(hidden_state_shape) + enc_hs = jnp.zeros(hidden_state_shape) + hidden_states = HiddenStates( + encoder=enc_hs, + decoder_self_retn=dec_hs_self_retn, + decoder_cross_retn=dec_hs_cross_retn, + ) + return hidden_states diff --git a/mava/networks/utils/sable/positional_encoding.py b/mava/networks/utils/sable/positional_encoding.py new file mode 100644 index 000000000..fadafaeac --- /dev/null +++ b/mava/networks/utils/sable/positional_encoding.py @@ -0,0 +1,60 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Tuple + +import chex +import jax +import jax.numpy as jnp +from flax import linen as nn + + +class PositionalEncoding(nn.Module): + """Positional Encoding for Sable. Encodes position information into sequences""" + + d_model: int + + def setup(self) -> None: + # Set maximum sequence length for positional encoding + self.max_size = 10_000 + # Precompute the scaling factor for even indices (used in sine and cosine functions) + self.div_term = jnp.exp( + jnp.arange(0, self.d_model, 2) * (-jnp.log(10000.0) / self.d_model) + )[jnp.newaxis] + + def __call__( + self, key: chex.Array, query: chex.Array, value: chex.Array, position: chex.Array + ) -> Tuple[chex.Array, chex.Array, chex.Array]: + """Computes positional encoding for a given sequence of positions.""" + pe = jax.vmap(self._get_pos_encoding)(position) + + # Add positional encoding to the input tensors + key += pe + query += pe + value += pe + + return key, query, value + + def _get_pos_encoding(self, position: chex.Array) -> chex.Array: + """Computes positional encoding for a given the index of the token.""" + seq_len = position.shape[0] + + # Calculate positional encoding using sine for even indices and cosine for odd indices. + x = position[:, jnp.newaxis] * self.div_term + pe = jnp.zeros((seq_len, self.d_model)) + pe = pe.at[:, 0::2].set(jnp.sin(x)) + pe = pe.at[:, 1::2].set(jnp.cos(x)) + + return pe diff --git a/mava/systems/mat/anakin/mat.py b/mava/systems/mat/anakin/mat.py new file mode 100644 index 000000000..944ab77d1 --- /dev/null +++ b/mava/systems/mat/anakin/mat.py @@ -0,0 +1,598 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from functools import partial +from typing import Any, Dict, Tuple + +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict +from jax import tree +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import ActorState, get_eval_fn +from mava.networks.mat_network import MultiAgentTransformer +from mava.systems.mat.types import ActorApply, LearnerApply, LearnerState +from mava.systems.ppo.types import PPOTransition +from mava.types import ( + ExperimentOutput, + LearnerFn, + MarlEnv, + TimeStep, +) +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + merge_leading_dims, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def get_learner_fn( + env: MarlEnv, + apply_fns: Tuple[ActorApply, LearnerApply], + update_fn: optax.TransformUpdateFn, + config: DictConfig, +) -> LearnerFn[LearnerState]: + """Get the learner function.""" + + # Get apply and update functions for actor and critic networks. + actor_action_select_fn, actor_apply_fn = apply_fns + actor_update_fn = update_fn + + def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + learner_state (NamedTuple): + - params: The current model parameters. + - opt_state: The current optimizer states. + - key: The random number generator state. + - env_state: The environment state. + - last_timestep: The last timestep in the current trajectory. + _ (Any): The current metrics info. + """ + + def _env_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, PPOTransition]: + """Step the environment.""" + params, opt_state, key, env_state, last_timestep = learner_state + + # SELECT ACTION + key, policy_key = jax.random.split(key) + action, log_prob, value = actor_action_select_fn( # type: ignore + params, + last_timestep.observation, + policy_key, + ) + # STEP ENVIRONMENT + env_state, timestep = jax.vmap(env.step, in_axes=(0, 0))(env_state, action) + + # LOG EPISODE METRICS + # Repeat along the agent dimension. This is needed to handle the + # shuffling along the agent dimension during training. + info = tree.map( + lambda x: jnp.repeat(x[..., jnp.newaxis], config.system.num_agents, axis=-1), + timestep.extras["episode_metrics"], + ) + + # SET TRANSITION + done = tree.map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + timestep.last(), + ) + transition = PPOTransition( + done, + action, + value, + timestep.reward, + log_prob, + last_timestep.observation, + info, + ) + learner_state = LearnerState(params, opt_state, key, env_state, timestep) + return learner_state, transition + + # STEP ENVIRONMENT FOR ROLLOUT LENGTH + learner_state, traj_batch = jax.lax.scan( + _env_step, learner_state, None, config.system.rollout_length + ) + + # CALCULATE ADVANTAGE + params, opt_state, key, env_state, last_timestep = learner_state + + key, last_val_key = jax.random.split(key) + _, _, last_val = actor_action_select_fn( # type: ignore + params, + last_timestep.observation, + last_val_key, + ) + + def _calculate_gae( + traj_batch: PPOTransition, last_val: chex.Array + ) -> Tuple[chex.Array, chex.Array]: + """Calculate the GAE.""" + + def _get_advantages(gae_and_next_value: Tuple, transition: PPOTransition) -> Tuple: + """Calculate the GAE for a single transition.""" + gae, next_value = gae_and_next_value + done, value, reward = ( + transition.done, + transition.value, + transition.reward, + ) + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae + return (gae, value), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(last_val), last_val), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + advantages, targets = _calculate_gae(traj_batch, last_val) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_state, key = train_state + traj_batch, advantages, targets = batch_info + + def _loss_fn( + params: FrozenDict, + traj_batch: PPOTransition, + gae: chex.Array, + value_targets: chex.Array, + entropy_key: chex.PRNGKey, + ) -> Tuple: + """Calculate the actor loss.""" + # RERUN NETWORK + + log_prob, value, entropy = actor_apply_fn( # type: ignore + params, + traj_batch.obs, + traj_batch.action, + entropy_key, + ) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + + # Nomalise advantage at minibatch level + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + entropy = entropy.mean() + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + + # MSE LOSS + value_losses = jnp.square(value - value_targets) + value_losses_clipped = jnp.square(value_pred_clipped - value_targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + total_loss = ( + loss_actor + - config.system.ent_coef * entropy + + config.system.vf_coef * value_loss + ) + return total_loss, (loss_actor, entropy, value_loss) + + # CALCULATE ACTOR LOSS + key, entropy_key = jax.random.split(key) + actor_grad_fn = jax.value_and_grad(_loss_fn, has_aux=True) + actor_loss_info, actor_grads = actor_grad_fn( + params, + traj_batch, + advantages, + targets, + entropy_key, + ) + + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="batch" + ) + # pmean over devices. + actor_grads, actor_loss_info = jax.lax.pmean( + (actor_grads, actor_loss_info), axis_name="device" + ) + + # UPDATE ACTOR PARAMS AND OPTIMISER STATE + actor_updates, new_opt_state = actor_update_fn(actor_grads, opt_state) + new_params = optax.apply_updates(params, actor_updates) + + # PACK LOSS INFO + total_loss = actor_loss_info[0] + value_loss = actor_loss_info[1][2] + actor_loss = actor_loss_info[1][0] + entropy = actor_loss_info[1][1] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + + return (new_params, new_opt_state, key), loss_info + + params, opt_state, traj_batch, advantages, targets, key = update_state + key, batch_shuffle_key, agent_shuffle_key, entropy_key = jax.random.split(key, 4) + + # SHUFFLE MINIBATCHES + batch_size = config.system.rollout_length * config.arch.num_envs + permutation = jax.random.permutation(batch_shuffle_key, batch_size) + + batch = (traj_batch, advantages, targets) + batch = tree.map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = tree.map(lambda x: jnp.take(x, permutation, axis=0), batch) + + # Shuffle along the agent dimension as well + permutation = jax.random.permutation(agent_shuffle_key, config.system.num_agents) + shuffled_batch = tree.map(lambda x: jnp.take(x, permutation, axis=1), shuffled_batch) + + minibatches = tree.map( + lambda x: jnp.reshape(x, (config.system.num_minibatches, -1, *x.shape[1:])), + shuffled_batch, + ) + + # UPDATE MINIBATCHES + (params, opt_state, entropy_key), loss_info = jax.lax.scan( + _update_minibatch, (params, opt_state, entropy_key), minibatches + ) + + update_state = params, opt_state, traj_batch, advantages, targets, key + return update_state, loss_info + + update_state = params, opt_state, traj_batch, advantages, targets, key + + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_state, traj_batch, advantages, targets, key = update_state + learner_state = LearnerState(params, opt_state, key, env_state, last_timestep) + + metric = traj_batch.info + + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + learner_state (NamedTuple): + - params: The initial model parameters. + - opt_state: The initial optimiser state. + - key: The random number generator state. + - env_state: The environment state. + - timesteps: The initial timestep in the initial trajectory. + """ + + batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + + learner_state, (episode_info, loss_info) = jax.lax.scan( + batched_update_step, learner_state, None, config.system.num_updates_per_eval + ) + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + env: MarlEnv, keys: chex.Array, config: DictConfig +) -> Tuple[LearnerFn[LearnerState], Any, LearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + # Get available TPU cores. + n_devices = len(jax.devices()) + + # Get number of agents. + config.system.num_agents = env.num_agents + + # PRNG keys. + key, actor_net_key = keys + + # Initialise observation: Obs for all agents. + init_x = env.observation_spec().generate_value() + init_x = tree.map(lambda x: x[None, ...], init_x) + + _, action_space_type = get_action_head(env) + + if action_space_type == "discrete": + init_action = jnp.zeros((1, config.system.num_agents), dtype=jnp.int32) + elif action_space_type == "continuous": + init_action = jnp.zeros((1, config.system.num_agents, env.action_dim), dtype=jnp.float32) + else: + raise ValueError("Invalid action space type") + + # Define network and optimiser. + actor_network = MultiAgentTransformer( + action_dim=env.action_dim, + n_agent=config.system.num_agents, + net_config=config.network, + action_space_type=action_space_type, + ) + + actor_lr = make_learning_rate(config.system.actor_lr, config) + actor_optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(actor_lr, eps=1e-5), + ) + + # Initialise actor params and optimiser state. + # `PRNGKey(0)` is just a dummy key we pass through the network since it needs a key for + # computing the network entropy at train time. + params = actor_network.init(actor_net_key, init_x, init_action, jax.random.PRNGKey(0)) + opt_state = actor_optim.init(params) + + # Pack apply and update functions. + apply_fns = ( + partial(actor_network.apply, method="get_actions"), + actor_network.apply, + ) + # Get batched iterated update and replicate it to pmap it over cores. + learn = get_learner_fn(env, apply_fns, actor_optim.update, config) + learn = jax.pmap(learn, axis_name="device") + + # Initialise environment states and timesteps: across devices and batches. + key, *env_keys = jax.random.split( + key, n_devices * config.system.update_batch_size * config.arch.num_envs + 1 + ) + env_states, timesteps = jax.vmap(env.reset, in_axes=(0))( + jnp.stack(env_keys), + ) + reshape_states = lambda x: x.reshape( + (n_devices, config.system.update_batch_size, config.arch.num_envs) + x.shape[1:] + ) + # (devices, update batch size, num_envs, ...) + env_states = tree.map(reshape_states, env_states) + timesteps = tree.map(reshape_states, timesteps) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, _ = loaded_checkpoint.restore_params(input_params=params) + # Update the params + params = restored_params + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + replicate_learner = (params, opt_state, step_keys) + + # Duplicate learner for update_batch_size. + broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size, *x.shape)) + replicate_learner = tree.map(broadcast, replicate_learner) + + # Duplicate learner across devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=jax.devices()) + # Initialise learner state. + params, opt_state, step_keys = replicate_learner + init_learner_state = LearnerState(params, opt_state, step_keys, env_states, timesteps) + + return learn, actor_network, init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + n_devices = len(jax.devices()) + + # Create the enviroments for train and eval. + env, eval_env = environments.make(config) + + # PRNG keys. + key, key_e, actor_net_key = jax.random.split(jax.random.PRNGKey(config.system.seed), num=3) + + # Setup learner. + learn, actor_network, learner_state = learner_setup(env, (key, actor_net_key), config) + + eval_keys = jax.random.split(key_e, n_devices) + + def eval_act_fn( + params: FrozenDict, + timestep: TimeStep, + key: chex.PRNGKey, + actor_state: ActorState, + ) -> Tuple[chex.Array, ActorState]: + """The acting function that get's passed to the evaluator. + Given that the MAT network has a `get_actions` method we define this eval_act_fn + accordingly. + """ + + del actor_state # Unused since the system doesn't have memory over time. + output_action, _, _ = actor_network.apply( # type: ignore + params, + timestep.observation, + key, + method="get_actions", + ) + return output_action, {} + + evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) + + # Calculate total timesteps. + config = check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + assert ( + config.arch.num_envs % config.system.num_minibatches == 0 + ), "Number of envs must be divisibile by number of minibatches." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + steps_per_rollout = ( + n_devices + * config.system.num_updates_per_eval + * config.system.rollout_length + * config.system.update_batch_size + * config.arch.num_envs + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Run experiment for a total number of evaluations. + max_episode_return = -jnp.inf + best_params = None + for eval_step in range(config.arch.num_evaluation): + # Train. + start_time = time.time() + + learner_output = learn(learner_state) + jax.block_until_ready(learner_output) + + # Log the results of the training. + elapsed_time = time.time() - start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + + trained_params = unreplicate_batch_dim(learner_state.params) + key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) + eval_keys = jnp.stack(eval_keys) + eval_keys = eval_keys.reshape(n_devices, -1) + + # Evaluate. + eval_metrics = evaluator(trained_params, eval_keys, {}) + jax.block_until_ready(eval_metrics) + logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) + episode_return = jnp.mean(eval_metrics["episode_return"]) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(trained_params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + abs_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=True) + eval_keys = jax.random.split(key, n_devices) + + eval_metrics = abs_metric_evaluator(best_params, eval_keys, {}) + jax.block_until_ready(eval_metrics) + + t = int(steps_per_rollout * (eval_step + 1)) + logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + +@hydra.main( + config_path="../../../configs/default", + config_name="mat.yaml", + version_base="1.2", +) +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "mat" + + eval_performance = run_experiment(cfg) + jax.block_until_ready(eval_performance) + print(f"{Fore.CYAN}{Style.BRIGHT}MAT experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/mat/types.py b/mava/systems/mat/types.py new file mode 100644 index 000000000..a8875bb5c --- /dev/null +++ b/mava/systems/mat/types.py @@ -0,0 +1,51 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Tuple + +import chex +from chex import Array, PRNGKey +from flax.core.frozen_dict import FrozenDict +from jumanji.types import TimeStep +from optax._src.base import OptState +from typing_extensions import NamedTuple + +from mava.types import MavaObservation, State + + +class LearnerState(NamedTuple): + """State of the learner.""" + + params: FrozenDict + opt_state: OptState + key: chex.PRNGKey + env_state: State + timestep: TimeStep + + +class MATNetworkConfig(NamedTuple): + """Configuration for the MAT network.""" + + n_block: int + n_head: int + embed_dim: int + use_swiglu: bool + use_rmsnorm: bool + + +ActorApply = Callable[ + [FrozenDict, MavaObservation, PRNGKey], + Tuple[Array, Array, Array, Array], +] +LearnerApply = Callable[[FrozenDict, MavaObservation, Array, PRNGKey], Tuple[Array, Array, Array]] diff --git a/mava/systems/ppo/anakin/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py index da3ff1ebd..698c505b2 100644 --- a/mava/systems/ppo/anakin/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -43,6 +43,7 @@ unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -361,9 +362,8 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=env.action_dim - ) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) actor_network = Actor(torso=actor_torso, action_head=actor_action_head) @@ -479,6 +479,10 @@ def run_experiment(_config: DictConfig) -> float: config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." + assert ( + config.arch.num_envs % config.system.num_minibatches == 0 + ), "Number of envs must be divisibile by number of minibatches." + # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation steps_per_rollout = ( diff --git a/mava/systems/ppo/anakin/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py index 1e335e7f9..3103cc164 100644 --- a/mava/systems/ppo/anakin/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -38,6 +38,7 @@ from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -345,9 +346,8 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=env.action_dim - ) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) actor_network = Actor(torso=actor_torso, action_head=actor_action_head) @@ -463,6 +463,10 @@ def run_experiment(_config: DictConfig) -> float: config.system.num_updates > config.arch.num_evaluation ), "Number of updates per evaluation must be less than total number of updates." + assert ( + config.arch.num_envs % config.system.num_minibatches == 0 + ), "Number of envs must be divisibile by number of minibatches." + # Calculate number of updates per evaluation. config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation steps_per_rollout = ( diff --git a/mava/systems/ppo/anakin/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py index f648e12ea..b936262ff 100644 --- a/mava/systems/ppo/anakin/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -52,6 +52,7 @@ from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -456,9 +457,8 @@ def learner_setup( # Define network and optimisers. actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=env.action_dim - ) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) @@ -594,6 +594,10 @@ def run_experiment(_config: DictConfig) -> float: config.system.rollout_length % config.system.recurrent_chunk_size == 0 ), "Rollout length must be divisible by recurrent chunk size." + assert ( + config.arch.num_envs % config.system.num_minibatches == 0 + ), "Number of envs must be divisibile by number of minibatches." + # Create the enviroments for train and eval. env, eval_env = environments.make(config) diff --git a/mava/systems/ppo/anakin/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py index cd422a566..f1105fe73 100644 --- a/mava/systems/ppo/anakin/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -52,6 +52,7 @@ from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -451,9 +452,8 @@ def learner_setup( # Define network and optimiser. actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=env.action_dim - ) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) @@ -590,6 +590,10 @@ def run_experiment(_config: DictConfig) -> float: config.system.rollout_length % config.system.recurrent_chunk_size == 0 ), "Rollout length must be divisible by recurrent chunk size." + assert ( + config.arch.num_envs % config.system.num_minibatches == 0 + ), "Number of envs must be divisibile by number of minibatches." + # Create the enviroments for train and eval. env, eval_env = environments.make(config=config, add_global_state=True) diff --git a/mava/systems/q_learning/anakin/rec_iql.py b/mava/systems/q_learning/anakin/rec_iql.py index 5a1d7df34..a5a876ccd 100644 --- a/mava/systems/q_learning/anakin/rec_iql.py +++ b/mava/systems/q_learning/anakin/rec_iql.py @@ -29,7 +29,6 @@ from flax.core.scope import FrozenVariableDict from flax.linen import FrozenDict from jax import Array, tree -from jumanji.env import Environment from jumanji.types import TimeStep from omegaconf import DictConfig, OmegaConf from rich.pretty import pprint @@ -45,7 +44,7 @@ TrainState, Transition, ) -from mava.types import Observation +from mava.types import MarlEnv, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer from mava.utils.config import check_total_timesteps @@ -61,7 +60,7 @@ def init( cfg: DictConfig, ) -> Tuple[ - Tuple[Environment, Environment], + Tuple[MarlEnv, MarlEnv], RecQNetwork, optax.GradientTransformation, TrajectoryBuffer, @@ -69,24 +68,7 @@ def init( MavaLogger, PRNGKey, ]: - """Initialize system by creating the envs, networks etc. - - Args: - ---- - cfg: System configuration. - - Returns: - ------- - Tuple containing: - Tuple[Environment, Environment]: The environment and evaluation environment. - RecQNetwork: Recurrent Q network. - optax.GradientTransformation: Optimiser for RecQNetwork. - TrajectoryBuffer: The replay buffer. - LearnerState: The initial learner state. - MavaLogger: The logger. - PRNGKey: The random key. - - """ + """Initialize system by creating the envs, networks etc.""" logger = MavaLogger(cfg) key = jax.random.PRNGKey(cfg.system.seed) @@ -103,17 +85,19 @@ def replicate(x: Any) -> Any: num_agents = env.num_agents key, q_key = jax.random.split(key, 2) + # Shape legend: - # T: Time (dummy dimension size = 1) - # B: Batch (dummy dimension size = 1) - # A: Agent - # Make dummy inputs to init recurrent Q network -> need shape (T, B, A, ...) - init_obs = env.observation_spec().generate_value() # (A, ...) - # (B, T, A, ...) + # T: Time + # B: Batch + # N: Agent + + # Make dummy inputs to init recurrent Q network -> need shape (T, B, N, ...) + init_obs = env.observation_spec().generate_value() # (N, ...) + # (B, T, N, ...) init_obs_batched = tree.map(lambda x: x[jnp.newaxis, jnp.newaxis, ...], init_obs) init_term_or_trunc = jnp.zeros((1, 1, 1), dtype=bool) # (T, B, 1) init_x = (init_obs_batched, init_term_or_trunc) # pack the RNN dummy inputs - # (B, A, ...) + # (B, N, ...) init_hidden_state = ScannedRNN.initialize_carry( (cfg.arch.num_envs, num_agents), cfg.network.hidden_state_dim ) @@ -146,9 +130,9 @@ def replicate(x: Any) -> Any: init_hidden_state = replicate(init_hidden_state) # Create dummy transition - init_acts = env.action_spec().generate_value() # (A,) + init_acts = env.action_spec().generate_value() # (N,) init_transition = Transition( - obs=init_obs, # (A, ...) + obs=init_obs, # (N, ...) action=init_acts, reward=jnp.zeros((num_agents,), dtype=float), terminal=jnp.zeros((1,), dtype=bool), # one flag for all agents @@ -159,7 +143,7 @@ def replicate(x: Any) -> Any: # Initialise trajectory buffer rb = fbx.make_trajectory_buffer( # n transitions gives n-1 full data points - sample_sequence_length=cfg.system.sample_sequence_length + 1, + sample_sequence_length=cfg.system.sample_sequence_length, period=1, # sample any unique trajectory add_batch_size=cfg.arch.num_envs, sample_batch_size=cfg.system.sample_batch_size, @@ -216,45 +200,18 @@ def replicate(x: Any) -> Any: def make_update_fns( cfg: DictConfig, - env: Environment, + env: MarlEnv, q_net: RecQNetwork, opt: optax.GradientTransformation, rb: TrajectoryBuffer, -) -> Callable[[LearnerState], Tuple[LearnerState, Tuple[Metrics, Metrics]]]: - """Create the update function for the Q-learner. - - Args: - ---- - cfg: System configuration. - env: Learning environment. - q_net: Recurrent q network. - opt: Optimiser for the recurrent Q network. - rb: The replay buffer. - - Returns: - ------- - The update function. - - """ +) -> Callable[[LearnerState[QNetParams]], Tuple[LearnerState[QNetParams], Tuple[Metrics, Metrics]]]: + """Create the update function for the Q-learner.""" # ---- Acting functions ---- def select_eps_greedy_action( action_selection_state: ActionSelectionState, obs: Observation, term_or_trunc: Array ) -> Tuple[ActionSelectionState, Array]: - """Select action to take in epsilon-greedy way. Batch and agent dims are included. - - Args: - ---- - action_selection_state: Tuple of online parameters, previous hidden state, - environment timestep (used to calculate epsilon) and a random key. - obs: The observation from the previous timestep. - term_or_trunc: The flag timestep.last() from the previous timestep. - - Returns: - ------- - A tuple of the updated action selection state and the chosen action. - - """ + """Select action to take in epsilon-greedy way. Batch and agent dims are included.""" params, hidden_state, t, key = action_selection_state eps = jnp.maximum( @@ -271,7 +228,7 @@ def select_eps_greedy_action( new_key, explore_key = jax.random.split(key, 2) action = eps_greedy_dist.sample(seed=explore_key) - action = action[0, ...] # (1, B, A) -> (B, A) + action = action[0, ...] # (1, B, N) -> (B, N) next_action_selection_state = ActionSelectionState( params, next_hidden_state, t + cfg.arch.num_envs, new_key @@ -371,24 +328,24 @@ def q_loss_fn( return q_loss, loss_info def update_q( - params: QNetParams, opt_states: optax.OptState, data: Transition, t_train: int + params: QNetParams, opt_states: optax.OptState, data_full: Transition, t_train: int ) -> Tuple[QNetParams, optax.OptState, Metrics]: """Update the Q parameters.""" # Get data aligned with current/next timestep - data_first = tree.map(lambda x: x[:, :-1, ...], data) - data_next = tree.map(lambda x: x[:, 1:, ...], data) + data = tree.map(lambda x: x[:, :-1, ...], data_full) + data_next = tree.map(lambda x: x[:, 1:, ...], data_full) - obs = data_first.obs - term_or_trunc = data_first.term_or_trunc - reward = data_first.reward - action = data_first.action + obs = data.obs + term_or_trunc = data.term_or_trunc + reward = data.reward + action = data.action # The three following variables all come from the same time step. # They are stored and accessed in this way because of the `AutoResetWrapper`. - # At the end of an episode `data_first.next_obs` and `data_next.obs` will be - # different, which is why we need to store both. Thus `data_first.next_obs` + # At the end of an episode `data.next_obs` and `data_next.obs` will be + # different, which is why we need to store both. Thus `data.next_obs` # aligns with the `terminal` from `data_next`. - next_obs = data_first.next_obs + next_obs = data.next_obs next_term_or_trunc = data_next.term_or_trunc next_terminal = data_next.terminal @@ -443,7 +400,9 @@ def update_q( return next_params, next_opt_state, q_loss_info - def train(train_state: TrainState, _: Any) -> Tuple[TrainState, Metrics]: + def train( + train_state: TrainState[QNetParams], _: Any + ) -> Tuple[TrainState[QNetParams], Metrics]: """Sample, train and repack.""" # unpack and get keys buffer_state, params, opt_states, t_train, key = train_state @@ -468,8 +427,8 @@ def train(train_state: TrainState, _: Any) -> Tuple[TrainState, Metrics]: scanned_train = lambda state: lax.scan(train, state, None, length=cfg.system.epochs) def update_step( - learner_state: LearnerState, _: Any - ) -> Tuple[LearnerState, Tuple[Metrics, Metrics]]: + learner_state: LearnerState[QNetParams], _: Any + ) -> Tuple[LearnerState[QNetParams], Tuple[Metrics, Metrics]]: """Interact, then learn.""" # unpack and get random keys ( @@ -527,7 +486,7 @@ def update_step( donate_argnums=0, ) - return pmaped_update_step # type:ignore + return pmaped_update_step def run_experiment(cfg: DictConfig) -> float: @@ -565,8 +524,7 @@ def eval_act_fn( term_or_trunc = timestep.last() net_input = (timestep.observation, term_or_trunc[..., jnp.newaxis]) net_input = tree.map(lambda x: x[jnp.newaxis], net_input) # add batch dim to obs - - next_hidden_state, eps_greedy_dist = q_net.apply(params, hidden_state, net_input, 0.0) + next_hidden_state, eps_greedy_dist = q_net.apply(params, hidden_state, net_input) action = eps_greedy_dist.sample(seed=key).squeeze(0) return action, {"hidden_state": next_hidden_state} @@ -587,6 +545,7 @@ def eval_act_fn( ) max_episode_return = -jnp.inf + best_params = copy.deepcopy(unreplicate_batch_dim(learner_state.params.online)) # Main loop: for eval_idx, t in enumerate( @@ -619,6 +578,7 @@ def eval_act_fn( eval_keys = jax.random.split(eval_key, cfg.arch.n_devices) eval_params = unreplicate_batch_dim(learner_state.params.online) eval_metrics = evaluator(eval_params, eval_keys, {"hidden_state": eval_hs}) + jax.block_until_ready(eval_metrics) logger.log(eval_metrics, t, eval_idx, LogEvent.EVAL) episode_return = jnp.mean(eval_metrics["episode_return"]) @@ -655,7 +615,7 @@ def eval_act_fn( logger.stop() - return float(eval_performance) + return eval_performance @hydra.main( @@ -670,11 +630,11 @@ def hydra_entry_point(cfg: DictConfig) -> float: cfg.logger.system_name = "rec_iql" # Run experiment. - final_return = run_experiment(cfg) + eval_performance = run_experiment(cfg) print(f"{Fore.CYAN}{Style.BRIGHT}IDQN experiment completed{Style.RESET_ALL}") - return float(final_return) + return eval_performance if __name__ == "__main__": diff --git a/mava/systems/q_learning/anakin/rec_qmix.py b/mava/systems/q_learning/anakin/rec_qmix.py new file mode 100644 index 000000000..2b485bd09 --- /dev/null +++ b/mava/systems/q_learning/anakin/rec_qmix.py @@ -0,0 +1,689 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from typing import Any, Callable, Dict, Tuple + +import chex +import flashbax as fbx +import hydra +import jax +import jax.lax as lax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flashbax.buffers.flat_buffer import TrajectoryBuffer +from flax.core.scope import FrozenVariableDict +from flax.linen import FrozenDict +from jax import Array, tree +from jumanji.types import TimeStep +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import ActorState, get_eval_fn, get_num_eval_envs +from mava.networks import RecQNetwork, ScannedRNN +from mava.networks.base import QMixingNetwork +from mava.systems.q_learning.types import ( + ActionSelectionState, + ActionState, + LearnerState, + Metrics, + QMIXParams, + TrainState, + Transition, +) +from mava.types import MarlEnv, Observation +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + switch_leading_axes, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.wrappers import episode_metrics + + +def init( + cfg: DictConfig, +) -> Tuple[ + Tuple[MarlEnv, MarlEnv], + RecQNetwork, + QMixingNetwork, + optax.GradientTransformation, + TrajectoryBuffer, + LearnerState, + MavaLogger, + chex.PRNGKey, +]: + """Initialize system by creating the envs, networks etc.""" + logger = MavaLogger(cfg) + + # init key, get devices available + key = jax.random.PRNGKey(cfg.system.seed) + devices = jax.devices() + + def replicate(x: Any) -> Any: + """First replicate the update batch dim then put on devices.""" + x = tree.map(lambda y: jnp.broadcast_to(y, (cfg.system.update_batch_size, *y.shape)), x) + return jax.device_put_replicated(x, devices) + + env, eval_env = environments.make(cfg, add_global_state=True) + + action_dim = env.action_dim + num_agents = env.num_agents + + key, q_key = jax.random.split(key, 2) + + # Shape legend: + # T: Time + # B: Batch + # N: Agent + + # Make dummy inputs to init recurrent Q network -> need shape (T, B, N, ...) + init_obs = env.observation_spec().generate_value() # (N, ...) + # (B, T, N, ...) + init_obs_batched = tree.map(lambda x: x[jnp.newaxis, jnp.newaxis, ...], init_obs) + init_term_or_trunc = jnp.zeros((1, 1, 1), dtype=bool) # (T, B, 1) + init_x = (init_obs_batched, init_term_or_trunc) + # (B, N, ...) + init_hidden_state = ScannedRNN.initialize_carry( + (cfg.arch.num_envs, num_agents), cfg.network.hidden_state_dim + ) + + # Making recurrent Q network + pre_torso = hydra.utils.instantiate(cfg.network.q_network.pre_torso) + post_torso = hydra.utils.instantiate(cfg.network.q_network.post_torso) + q_net = RecQNetwork( + pre_torso=pre_torso, + post_torso=post_torso, + num_actions=action_dim, + hidden_state_dim=cfg.network.hidden_state_dim, + ) + q_params = q_net.init(q_key, init_hidden_state, init_x) + q_target_params = q_net.init(q_key, init_hidden_state, init_x) + + # Make Mixer Network + dummy_agent_qs = jnp.zeros( + ( + cfg.system.sample_batch_size, + cfg.system.sample_sequence_length - 1, + num_agents, + ), + dtype=float, + ) + global_env_state_shape = ( + env.observation_spec().generate_value().global_state[0, :].shape + ) # NOTE: Env wrapper currently duplicates env state for each agent + dummy_global_env_state = jnp.zeros( + ( + cfg.system.sample_batch_size, + cfg.system.sample_sequence_length - 1, + *global_env_state_shape, + ), + dtype=float, + ) + q_mixer = hydra.utils.instantiate( + cfg.network.mixer_network, + num_actions=action_dim, + num_agents=num_agents, + embed_dim=cfg.system.qmix_embed_dim, + ) + mixer_online_params = q_mixer.init(q_key, dummy_agent_qs, dummy_global_env_state) + mixer_target_params = q_mixer.init(q_key, dummy_agent_qs, dummy_global_env_state) + + # Pack params + params = QMIXParams(q_params, q_target_params, mixer_online_params, mixer_target_params) + + # Optimiser + opt = optax.chain( + optax.adam(learning_rate=cfg.system.q_lr), + ) + opt_state = opt.init((params.online, params.mixer_online)) + + # Distribute params, opt states and hidden states across all devices + params = replicate(params) + opt_state = replicate(opt_state) + init_hidden_state = replicate(init_hidden_state) + + init_acts = env.action_spec().generate_value() + + # NOTE: term_or_trunc refers to the the joint done, ie. when all agents are done or when the + # episode horizon has been reached. We use this exclusively in QMIX. + # Terminal refers to individual agent dones. We keep this here for consistency with IQL. + init_transition = Transition( + obs=init_obs, # (N, ...) + action=init_acts, # (N,) + reward=jnp.zeros((1,), dtype=float), + terminal=jnp.zeros((1,), dtype=bool), + term_or_trunc=jnp.zeros((1,), dtype=bool), + next_obs=init_obs, + ) + + # Initialise trajectory buffer + rb = fbx.make_trajectory_buffer( + # n transitions gives n-1 full data points + sample_sequence_length=cfg.system.sample_sequence_length, + period=1, # sample any unique trajectory + add_batch_size=cfg.arch.num_envs, + sample_batch_size=cfg.system.sample_batch_size, + max_length_time_axis=cfg.system.buffer_size, + min_length_time_axis=cfg.system.min_buffer_size, + ) + buffer_state = rb.init(init_transition) + buffer_state = replicate(buffer_state) + + # Reset env + n_keys = cfg.arch.num_envs * cfg.arch.n_devices * cfg.system.update_batch_size + key_shape = (cfg.arch.n_devices, cfg.system.update_batch_size, cfg.arch.num_envs, -1) + key, reset_key = jax.random.split(key) + reset_keys = jax.random.split(reset_key, n_keys) + reset_keys = jnp.reshape(reset_keys, key_shape) + + # Get initial state and timestep per-device + env_state, first_timestep = jax.pmap( # devices + jax.vmap( # update_batch_size + jax.vmap(env.reset), # num_envs + axis_name="batch", + ), + axis_name="device", + )(reset_keys) + first_obs = first_timestep.observation + first_term_or_trunc = first_timestep.last()[..., jnp.newaxis] + first_term = (1 - first_timestep.discount[..., 0, jnp.newaxis]).astype(bool) + + # Initialise env steps and training steps + t0_act = jnp.zeros((cfg.arch.n_devices, cfg.system.update_batch_size), dtype=int) + t0_train = jnp.zeros((cfg.arch.n_devices, cfg.system.update_batch_size), dtype=int) + + # Keys passed to learner + first_keys = jax.random.split(key, (cfg.arch.n_devices * cfg.system.update_batch_size)) + first_keys = first_keys.reshape((cfg.arch.n_devices, cfg.system.update_batch_size, -1)) + + # Initial learner state. + learner_state = LearnerState( + first_obs, + first_term, + first_term_or_trunc, + init_hidden_state, + env_state, + t0_act, + t0_train, + opt_state, + buffer_state, + params, + first_keys, + ) + + return (env, eval_env), q_net, q_mixer, opt, rb, learner_state, logger, key + + +def make_update_fns( + cfg: DictConfig, + env: MarlEnv, + q_net: RecQNetwork, + mixer: QMixingNetwork, + opt: optax.GradientTransformation, + rb: TrajectoryBuffer, +) -> Callable[[LearnerState[QMIXParams]], Tuple[LearnerState[QMIXParams], Tuple[Metrics, Metrics]]]: + def select_eps_greedy_action( + action_selection_state: ActionSelectionState, + obs: Observation, + term_or_trunc: Array, + ) -> Tuple[ActionSelectionState, Array]: + """Select action to take in eps-greedy way. Batch and agent dims are included.""" + + params, hidden_state, t, key = action_selection_state + + eps = jnp.maximum( + cfg.system.eps_min, 1 - (t / cfg.system.eps_decay) * (1 - cfg.system.eps_min) + ) + + obs = tree.map(lambda x: x[jnp.newaxis, ...], obs) + term_or_trunc = tree.map(lambda x: x[jnp.newaxis, ...], term_or_trunc) + + next_hidden_state, eps_greedy_dist = q_net.apply( + params, hidden_state, (obs, term_or_trunc), eps + ) + + new_key, explore_key = jax.random.split(key, 2) + + action = eps_greedy_dist.sample(seed=explore_key) + action = action[0, ...] # (1, B, N) -> (B, N) + + # repack new selection params + next_action_selection_state = ActionSelectionState( + params, next_hidden_state, t + cfg.arch.num_envs, new_key + ) + return next_action_selection_state, action + + def action_step(action_state: ActionState, _: Any) -> Tuple[ActionState, Dict]: + """Selects an action, steps global env, stores timesteps in global rb and repacks the + parameters for the next step. + """ + + action_selection_state, env_state, buffer_state, obs, terminal, term_or_trunc = action_state + + next_action_selection_state, action = select_eps_greedy_action( + action_selection_state, obs, term_or_trunc + ) + + next_env_state, next_timestep = jax.vmap(env.step)(env_state, action) + + # Get reward + # NOTE: Combine agent rewards, since QMIX is cooperative. + reward = jnp.mean(next_timestep.reward, axis=-1, keepdims=True) + + transition = Transition( + obs, action, reward, terminal, term_or_trunc, next_timestep.extras["real_next_obs"] + ) + # Add dummy time dim + transition = tree.map(lambda x: x[:, jnp.newaxis, ...], transition) + next_buffer_state = rb.add(buffer_state, transition) + + next_obs = next_timestep.observation + # Make compatible with network input and transition storage in next step + next_terminal = (1 - next_timestep.discount[..., 0, jnp.newaxis]).astype(bool) + next_term_or_trunc = next_timestep.last()[..., jnp.newaxis] + + new_act_state = ActionState( + next_action_selection_state, + next_env_state, + next_buffer_state, + next_obs, + next_terminal, + next_term_or_trunc, + ) + + return new_act_state, next_timestep.extras["episode_metrics"] + + def prep_inputs_to_scannedrnn(obs: Observation, term_or_trunc: chex.Array) -> chex.Array: + """Prepares the inputs to the RNN network for either getting q values or the + eps-greedy distribution. + + Mostly swaps leading axes because the replay buffer outputs (B, T, ... ) + and the RNN takes in (T, B, ...). + """ + hidden_state = ScannedRNN.initialize_carry( + (cfg.system.sample_batch_size, obs.agents_view.shape[2]), cfg.network.hidden_state_dim + ) + # the rb outputs (B, T, ... ) the RNN takes in (T, B, ...) + obs = switch_leading_axes(obs) # (B, T) -> (T, B) + term_or_trunc = switch_leading_axes(term_or_trunc) # (B, T) -> (T, B) + obs_term_or_trunc = (obs, term_or_trunc) + + return hidden_state, obs_term_or_trunc + + def q_loss_fn( + online_params: FrozenVariableDict, + obs: Array, + term_or_trunc: Array, + action: Array, + target: Array, + ) -> Tuple[Array, Metrics]: + """The portion of the calculation to grad, namely online apply and mse with target.""" + q_online_params, online_mixer_params = online_params + + # Axes switched to scan over time + hidden_state, obs_term_or_trunc = prep_inputs_to_scannedrnn(obs, term_or_trunc) + + # Get online q values of all actions + _, q_online = q_net.apply( + q_online_params, hidden_state, obs_term_or_trunc, method="get_q_values" + ) + q_online = switch_leading_axes(q_online) # (T, B, ...) -> (B, T, ...) + # Get the q values of the taken actions and remove extra dim + q_online = jnp.squeeze( + jnp.take_along_axis(q_online, action[..., jnp.newaxis], axis=-1), axis=-1 + ) + + # NOTE: States are replicated over agents so we take only take first one + q_online = mixer.apply( + online_mixer_params, q_online, obs.global_state[:, :, 0, ...] + ) # (B, T, N, ...) -> (B , T, 1 , ...) + + q_loss = jnp.mean((q_online - target) ** 2) + + q_error = q_online - target + loss_info = { + "q_loss": q_loss, + "mean_q": jnp.mean(q_online), + "max_q_error": jnp.max(jnp.abs(q_error) ** 2), + "min_q_error": jnp.min(jnp.abs(q_error) ** 2), + "mean_target": jnp.mean(target), + } + + return q_loss, loss_info + + def update_q( + params: QMIXParams, opt_states: optax.OptState, data_full: Transition, t_train: int + ) -> Tuple[QMIXParams, optax.OptState, Metrics]: + """Update the Q parameters.""" + + # Get data aligned with current/next timestep + data = tree.map(lambda x: x[:, :-1, ...], data_full) # (B, T, ...) + data_next = tree.map(lambda x: x[:, 1:, ...], data_full) # (B, T, ...) + + reward = data.reward + next_done = data_next.term_or_trunc + + # Get the greedy action using the distribution. + # Epsilon defaults to 0. + hidden_state, next_obs_term_or_trunc = prep_inputs_to_scannedrnn( + data_full.obs, data_full.term_or_trunc + ) # (T, B, ...) + _, next_greedy_dist = q_net.apply(params.online, hidden_state, next_obs_term_or_trunc) + next_action = next_greedy_dist.mode() # (T, B, ...) + next_action = switch_leading_axes(next_action) # (T, B, ...) -> (B, T, ...) + next_action = next_action[:, 1:, ...] # (B, T, ...) + + hidden_state, next_obs_term_or_trunc = prep_inputs_to_scannedrnn( + data_full.obs, data_full.term_or_trunc + ) # (T, B, ...) + + _, next_q_vals_target = q_net.apply( + params.target, hidden_state, next_obs_term_or_trunc, method="get_q_values" + ) + next_q_vals_target = switch_leading_axes(next_q_vals_target) # (T, B, ...) -> (B, T, ...) + next_q_vals_target = next_q_vals_target[:, 1:, ...] # (B, T, ...) + + # Double q-value selection + next_q_val = jnp.squeeze( + jnp.take_along_axis(next_q_vals_target, next_action[..., jnp.newaxis], axis=-1), axis=-1 + ) + + next_q_val = mixer.apply( + params.mixer_target, next_q_val, data_next.obs.global_state[:, :, 0, ...] + ) # (B, T, N, ...) -> (B , T, 1 , ...) + + # TD Target + target_q_val = reward + (1.0 - next_done) * cfg.system.gamma * next_q_val + + q_grad_fn = jax.grad(q_loss_fn, has_aux=True) + q_grads, q_loss_info = q_grad_fn( + (params.online, params.mixer_online), + data.obs, + data.term_or_trunc, + data.action, + target_q_val, + ) + q_loss_info["mean_reward_t0"] = jnp.mean(reward) + q_loss_info["mean_next_qval"] = jnp.mean(next_q_val) + q_loss_info["done"] = jnp.mean(data_full.term_or_trunc) + + # Mean over the device and batch dimension. + q_grads, q_loss_info = lax.pmean((q_grads, q_loss_info), axis_name="device") + q_grads, q_loss_info = lax.pmean((q_grads, q_loss_info), axis_name="batch") + q_updates, next_opt_state = opt.update(q_grads, opt_states) + (next_online_params, next_mixer_params) = optax.apply_updates( + (params.online, params.mixer_online), q_updates + ) + + # Target network update. + if cfg.system.hard_update: + next_target_params = optax.periodic_update( + next_online_params, params.target, t_train, cfg.system.update_period + ) + next_mixer_target_params = optax.periodic_update( + next_mixer_params, params.mixer_target, t_train, cfg.system.update_period + ) + else: + next_target_params = optax.incremental_update( + next_online_params, params.target, cfg.system.tau + ) + next_mixer_target_params = optax.incremental_update( + next_mixer_params, params.mixer_target, cfg.system.tau + ) + # Repack params and opt_states. + next_params = QMIXParams( + next_online_params, + next_target_params, + next_mixer_params, + next_mixer_target_params, + ) + + return next_params, next_opt_state, q_loss_info + + def train( + train_state: TrainState[QMIXParams], _: Any + ) -> Tuple[TrainState[QMIXParams], Metrics]: + """Sample, train and repack.""" + + buffer_state, params, opt_states, t_train, key = train_state + next_key, buff_key = jax.random.split(key, 2) + + data = rb.sample(buffer_state, buff_key).experience + + # Learn + next_params, next_opt_states, q_loss_info = update_q(params, opt_states, data, t_train) + + next_train_state = TrainState( + buffer_state, next_params, next_opt_states, t_train + 1, next_key + ) + + return next_train_state, q_loss_info + + # ---- Act-train loop ---- + scanned_act = lambda state: lax.scan(action_step, state, None, length=cfg.system.rollout_length) + scanned_train = lambda state: lax.scan(train, state, None, length=cfg.system.epochs) + + # Act and train + def update_step( + learner_state: LearnerState[QMIXParams], _: Any + ) -> Tuple[LearnerState[QMIXParams], Tuple[Metrics, Metrics]]: + """Act, then learn.""" + + ( + obs, + terminal, + term_or_trunc, + hidden_state, + env_state, + time_steps, + train_steps, + opt_state, + buffer_state, + params, + key, + ) = learner_state + new_key, act_key, train_key = jax.random.split(key, 3) + + # Select actions, step env and store transitions + action_selection_state = ActionSelectionState( + params.online, hidden_state, time_steps, act_key + ) + action_state = ActionState( + action_selection_state, env_state, buffer_state, obs, terminal, term_or_trunc + ) + final_action_state, metrics = scanned_act(action_state) + + # Sample and learn + train_state = TrainState( + final_action_state.buffer_state, params, opt_state, train_steps, train_key + ) + final_train_state, losses = scanned_train(train_state) + + next_learner_state = LearnerState( + final_action_state.obs, + final_action_state.terminal, + final_action_state.term_or_trunc, + final_action_state.action_selection_state.hidden_state, + final_action_state.env_state, + final_action_state.action_selection_state.time_steps, + final_train_state.train_steps, + final_train_state.opt_state, + final_action_state.buffer_state, + final_train_state.params, + new_key, + ) + + return next_learner_state, (metrics, losses) + + pmaped_update_step = jax.pmap( + jax.vmap( + lambda state: lax.scan(update_step, state, None, length=cfg.system.scan_steps), + axis_name="batch", + ), + axis_name="device", + donate_argnums=0, + ) + + return pmaped_update_step + + +def run_experiment(cfg: DictConfig) -> float: + cfg.arch.n_devices = len(jax.devices()) + cfg = check_total_timesteps(cfg) + + # Number of env steps before evaluating/logging. + steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) + # Multiplier for a single env/learn step in an anakin system + anakin_steps = cfg.arch.n_devices * cfg.system.update_batch_size + # Number of env steps in one anakin style update. + anakin_act_steps = anakin_steps * cfg.arch.num_envs * cfg.system.rollout_length + # Number of steps to do in the scanned update method (how many anakin steps). + cfg.system.scan_steps = int(steps_per_rollout / anakin_act_steps) + + pprint(OmegaConf.to_container(cfg, resolve=True)) + + # Initialise system and make learning/evaluation functions + (env, eval_env), q_net, q_mixer, opts, rb, learner_state, logger, key = init(cfg) + update = make_update_fns(cfg, env, q_net, q_mixer, opts, rb) + + cfg.system.num_agents = env.num_agents + + key, eval_key = jax.random.split(key) + + def eval_act_fn( + params: FrozenDict, timestep: TimeStep, key: chex.PRNGKey, actor_state: ActorState + ) -> Tuple[chex.Array, ActorState]: + """The acting function that get's passed to the evaluator. + A custom function is needed for epsilon-greedy acting. + """ + hidden_state = actor_state["hidden_state"] + + term_or_trunc = timestep.last() + net_input = (timestep.observation, term_or_trunc[..., jnp.newaxis]) + net_input = tree.map(lambda x: x[jnp.newaxis], net_input) # add batch dim to obs + next_hidden_state, eps_greedy_dist = q_net.apply(params, hidden_state, net_input) + action = eps_greedy_dist.sample(seed=key).squeeze(0) + return action, {"hidden_state": next_hidden_state} + + evaluator = get_eval_fn(eval_env, eval_act_fn, cfg, absolute_metric=False) + + if cfg.logger.checkpointing.save_model: + checkpointer = Checkpointer( + metadata=cfg, # Save all config as metadata in the checkpoint + model_name=cfg.logger.system_name, + **cfg.logger.checkpointing.save_args, # Checkpoint args + ) + + # Create an initial hidden state used for resetting memory for evaluation + eval_batch_size = get_num_eval_envs(cfg, absolute_metric=False) + eval_hs = ScannedRNN.initialize_carry( + (jax.device_count(), eval_batch_size, cfg.system.num_agents), + cfg.network.hidden_state_dim, + ) + + max_episode_return = -jnp.inf + best_params = copy.deepcopy(unreplicate_batch_dim(learner_state.params.online)) + + # Main loop: + for eval_idx, t in enumerate( + range(steps_per_rollout, int(cfg.system.total_timesteps + 1), steps_per_rollout) + ): + # Learn loop: + start_time = time.time() + learner_state, (metrics, losses) = update(learner_state) + jax.block_until_ready(learner_state) + + # Log: + elapsed_time = time.time() - start_time + eps = jnp.maximum( + cfg.system.eps_min, 1 - (t / cfg.system.eps_decay) * (1 - cfg.system.eps_min) + ) + final_metrics, ep_completed = episode_metrics.get_final_step_metrics(metrics) + final_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + loss_metrics = losses + logger.log({"timestep": t, "epsilon": eps}, t, eval_idx, LogEvent.MISC) + if ep_completed: + logger.log(final_metrics, t, eval_idx, LogEvent.ACT) + logger.log(loss_metrics, t, eval_idx, LogEvent.TRAIN) + + # Evaluate: + key, eval_key = jax.random.split(key) + eval_keys = jax.random.split(eval_key, cfg.arch.n_devices) + eval_params = unreplicate_batch_dim(learner_state.params.online) + eval_metrics = evaluator(eval_params, eval_keys, {"hidden_state": eval_hs}) + jax.block_until_ready(eval_metrics) + logger.log(eval_metrics, t, eval_idx, LogEvent.EVAL) + episode_return = jnp.mean(eval_metrics["episode_return"]) + + # Save best actor params. + if cfg.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(eval_params) + max_episode_return = episode_return + + # Checkpoint: + if cfg.logger.checkpointing.save_model: + # Save checkpoint of learner state + unreplicated_learner_state = unreplicate_n_dims(learner_state) + checkpointer.save( + timestep=t, + unreplicated_learner_state=unreplicated_learner_state, + episode_return=episode_return, + ) + + eval_performance = float(jnp.mean(eval_metrics[cfg.env.eval_metric])) + + # Measure absolute metric. + if cfg.arch.absolute_metric: + eval_keys = jax.random.split(key, cfg.arch.n_devices) + eval_batch_size = get_num_eval_envs(cfg, absolute_metric=True) + eval_hs = ScannedRNN.initialize_carry( + (jax.device_count(), eval_batch_size, cfg.system.num_agents), + cfg.network.hidden_state_dim, + ) + + abs_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, cfg, absolute_metric=True) + eval_metrics = abs_metric_evaluator(best_params, eval_keys, {"hidden_state": eval_hs}) + logger.log(eval_metrics, t, eval_idx, LogEvent.ABSOLUTE) + + logger.stop() + + return eval_performance + + +@hydra.main( + config_path="../../../configs/default/", + config_name="rec_qmix.yaml", + version_base="1.2", +) +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "rec_qmix" + # Run experiment. + eval_performance = run_experiment(cfg) + + print(f"{Fore.CYAN}{Style.BRIGHT}QMIX experiment completed{Style.RESET_ALL}") + + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/q_learning/types.py b/mava/systems/q_learning/types.py index 8abf05ec4..8e0cd8125 100644 --- a/mava/systems/q_learning/types.py +++ b/mava/systems/q_learning/types.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, NamedTuple +from typing import Dict, Generic, TypeVar import optax from chex import PRNGKey @@ -19,7 +19,7 @@ from flax.core.scope import FrozenVariableDict from jax import Array from jumanji.env import State -from typing_extensions import TypeAlias +from typing_extensions import NamedTuple, TypeAlias from mava.types import Observation @@ -49,27 +49,6 @@ class QNetParams(NamedTuple): target: FrozenVariableDict -class LearnerState(NamedTuple): - """State of the learner in an interaction-training loop.""" - - # Interaction vars - obs: Observation - terminal: Array - term_or_trunc: Array - hidden_state: Array - env_state: State - time_steps: Array - - # Train vars - train_steps: Array - opt_state: optax.OptState - - # Shared vars - buffer_state: TrajectoryBufferState - params: QNetParams - key: PRNGKey - - class ActionSelectionState(NamedTuple): """Everything used for action selection apart from the observation.""" @@ -90,11 +69,42 @@ class ActionState(NamedTuple): term_or_trunc: Array -class TrainState(NamedTuple): +class QMIXParams(NamedTuple): + online: FrozenVariableDict + target: FrozenVariableDict + mixer_online: FrozenVariableDict + mixer_target: FrozenVariableDict + + +QLearningParams = TypeVar("QLearningParams", QNetParams, QMIXParams) + + +class LearnerState(NamedTuple, Generic[QLearningParams]): + """State of the learner in an interaction-training loop.""" + + # Interaction vars + obs: Observation + terminal: Array + term_or_trunc: Array + hidden_state: Array + env_state: State + time_steps: Array + + # Train vars + train_steps: Array + opt_state: optax.OptState + + # Shared vars + buffer_state: TrajectoryBufferState + params: QLearningParams + key: PRNGKey + + +class TrainState(NamedTuple, Generic[QLearningParams]): """The carry in the training loop.""" buffer_state: BufferState - params: QNetParams + params: QLearningParams opt_state: optax.OptState train_steps: Array key: PRNGKey diff --git a/mava/systems/sable/__init__.py b/mava/systems/sable/__init__.py new file mode 100644 index 000000000..21db9ec1c --- /dev/null +++ b/mava/systems/sable/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/mava/systems/sable/anakin/__init__.py b/mava/systems/sable/anakin/__init__.py new file mode 100644 index 000000000..21db9ec1c --- /dev/null +++ b/mava/systems/sable/anakin/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/mava/systems/sable/anakin/ff_sable.py b/mava/systems/sable/anakin/ff_sable.py new file mode 100644 index 000000000..bcd7dd3e0 --- /dev/null +++ b/mava/systems/sable/anakin/ff_sable.py @@ -0,0 +1,669 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from functools import partial +from typing import Any, Callable, Dict, Tuple + +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict as Params +from jax import tree +from jumanji.env import Environment +from jumanji.types import TimeStep +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import ActorState, EvalActFn, get_eval_fn, get_num_eval_envs +from mava.networks import SableNetwork +from mava.networks.utils.sable import get_init_hidden_state +from mava.systems.sable.types import ( + ActorApply, + LearnerApply, + Transition, +) +from mava.systems.sable.types import FFLearnerState as LearnerState +from mava.types import Action, ExperimentOutput, LearnerFn, MarlEnv +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def get_learner_fn( + env: Environment, + apply_fns: Tuple[ActorApply, LearnerApply], + update_fn: optax.TransformUpdateFn, + config: DictConfig, +) -> LearnerFn[LearnerState]: + """Get the learner function.""" + + # Get apply functions for executing and training the network. + sable_action_select_fn, sable_apply_fn = apply_fns + + def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + ---- + learner_state (NamedTuple): + - params (FrozenDict): The current model parameters. + - opt_states (OptState): The current optimizer states. + - key (PRNGKey): The random number generator state. + - env_state (State): The environment state. + - last_timestep (TimeStep): The last timestep in the current trajectory. + _ (Any): The current metrics info. + + """ + + def _env_step(learner_state: LearnerState, _: int) -> Tuple[LearnerState, Transition]: + """Step the environment.""" + params, opt_states, key, env_state, last_timestep = learner_state + + # SELECT ACTION + key, policy_key = jax.random.split(key) + + # Apply the actor network to get the action, log_prob, value and updated hstates. + last_obs = last_timestep.observation + action, log_prob, value, _ = sable_action_select_fn( # type: ignore + params, + observation=last_obs, + key=policy_key, + ) + + # STEP ENVIRONMENT + env_state, timestep = jax.vmap(env.step, in_axes=(0, 0))(env_state, action) + + # LOG EPISODE METRICS + info = tree.map( + lambda x: jnp.repeat(x[..., jnp.newaxis], config.system.num_agents, axis=-1), + timestep.extras["episode_metrics"], + ) + + # SET TRANSITION + done = tree.map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + timestep.last(), + ) + transition = Transition( + done, + action, + value, + timestep.reward, + log_prob, + last_timestep.observation, + info, + ) + learner_state = LearnerState(params, opt_states, key, env_state, timestep) + return learner_state, transition + + # STEP ENVIRONMENT FOR ROLLOUT LENGTH + learner_state, traj_batch = jax.lax.scan( + _env_step, + learner_state, + jnp.arange(config.system.rollout_length), + config.system.rollout_length, + ) + + # CALCULATE ADVANTAGE + params, opt_states, key, env_state, last_timestep = learner_state + key, last_val_key = jax.random.split(key) + _, _, current_val, _ = sable_action_select_fn( # type: ignore + params, + observation=last_timestep.observation, + key=last_val_key, + ) + + def _calculate_gae( + traj_batch: Transition, + current_val: chex.Array, + ) -> Tuple[chex.Array, chex.Array]: + """Calculate the GAE.""" + + def _get_advantages( + carry: Tuple[chex.Array, chex.Array], transition: Transition + ) -> Tuple[Tuple[chex.Array, chex.Array], chex.Array]: + """Calculate the GAE for a single transition.""" + gae, next_value = carry + done, value, reward = ( + transition.done, + transition.value, + transition.reward, + ) + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - done) * gae + return (gae, value), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(current_val), current_val), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + advantages, targets = _calculate_gae(traj_batch, current_val) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_state = train_state + traj_batch, advantages, targets = batch_info + + def _loss_fn( + params: Params, + traj_batch: Transition, + gae: chex.Array, + value_targets: chex.Array, + ) -> Tuple: + """Calculate Sable loss.""" + # RERUN NETWORK + value, log_prob, entropy = sable_apply_fn( # type: ignore + params, + observation=traj_batch.obs, + action=traj_batch.action, + dones=traj_batch.done, + ) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + entropy = entropy.mean() + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + + # MSE LOSS + value_losses = jnp.square(value - value_targets) + value_losses_clipped = jnp.square(value_pred_clipped - value_targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + # TOTAL LOSS + total_loss = ( + loss_actor + - config.system.ent_coef * entropy + + config.system.vf_coef * value_loss + ) + return total_loss, (loss_actor, entropy, value_loss) + + # CALCULATE ACTOR LOSS + grad_fn = jax.value_and_grad(_loss_fn, has_aux=True) + loss_info, grads = grad_fn( + params, + traj_batch, + advantages, + targets, + ) + + # Compute the parallel mean (pmean) over the batch. + # This calculation is inspired by the Anakin architecture demo notebook. + # available at https://tinyurl.com/26tdzs5x + # This pmean could be a regular mean as the batch axis is on the same device. + grads, loss_info = jax.lax.pmean((grads, loss_info), axis_name="batch") + # pmean over devices. + grads, loss_info = jax.lax.pmean((grads, loss_info), axis_name="device") + + # UPDATE PARAMS AND OPTIMISER STATE + updates, new_opt_state = update_fn(grads, opt_state) + new_params = optax.apply_updates(params, updates) + + # PACK LOSS INFO + total_loss = loss_info[0] + actor_loss = loss_info[1][0] + entropy = loss_info[1][1] + value_loss = loss_info[1][2] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + + return (new_params, new_opt_state), loss_info + + ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + ) = update_state + + # SHUFFLE MINIBATCHES + key, batch_shuffle_key, agent_shuffle_key = jax.random.split(key, 3) + + # Shuffle batch + batch_size = config.system.rollout_length * config.arch.num_envs + permutation = jax.random.permutation(batch_shuffle_key, batch_size) + batch = (traj_batch, advantages, targets) + batch = tree.map(lambda x: merge_leading_dims(x, 2), batch) + shuffled_batch = tree.map(lambda x: jnp.take(x, permutation, axis=0), batch) + + # Shuffle agents + agent_perm = jax.random.permutation(agent_shuffle_key, config.system.num_agents) + shuffled_batch = tree.map(lambda x: jnp.take(x, agent_perm, axis=1), shuffled_batch) + + # SPLIT INTO MINIBATCHES + minibatches = tree.map( + lambda x: jnp.reshape(x, (config.system.num_minibatches, -1, *x.shape[1:])), + shuffled_batch, + ) + + # UPDATE MINIBATCHES + (params, opt_states), loss_info = jax.lax.scan( + _update_minibatch, + (params, opt_states), + minibatches, + ) + + update_state = ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + ) + return update_state, loss_info + + update_state = ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + ) + + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, key = update_state + learner_state = LearnerState( + params, + opt_states, + key, + env_state, + last_timestep, + ) + metric = traj_batch.info + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + ---- + learner_state (NamedTuple): + - params (FrozenDict): The initial model parameters. + - opt_state (OptState): The initial optimizer state. + - key (chex.PRNGKey): The random number generator state. + - env_state (LogEnvState): The environment state. + - timesteps (TimeStep): The initial timestep in the initial trajectory. + + """ + batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + + learner_state, (episode_info, loss_info) = jax.lax.scan( + batched_update_step, learner_state, None, config.system.num_updates_per_eval + ) + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + env: MarlEnv, keys: chex.Array, config: DictConfig +) -> Tuple[LearnerFn[LearnerState], Callable, LearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + # Get available TPU cores. + n_devices = len(jax.devices()) + + # Get number of agents. + config.system.num_agents = env.num_agents + + # PRNG keys. + key, net_key = keys + + # Get number of agents and actions. + action_dim = int(env.action_spec().num_values[0]) + n_agents = env.action_spec().shape[0] + config.system.num_agents = n_agents + config.system.num_actions = action_dim + + # Setting the chunksize - many agent problems require chunking agents + # Create a dummy decay factor for FF Sable + config.network.memory_config.decay_scaling_factor = 1.0 + if config.network.memory_config.agents_chunk_size: + config.network.memory_config.chunk_size = config.network.memory_config.agents_chunk_size + err = "Number of agents should be divisible by chunk size" + assert n_agents % config.network.memory_config.chunk_size == 0, err + else: + config.network.memory_config.chunk_size = n_agents + + # Set positional encoding to False, since ff-sable does not use temporal dependencies. + config.network.memory_config.timestep_positional_encoding = False + + _, action_space_type = get_action_head(env) + + # Define network. + sable_network = SableNetwork( + n_agents=n_agents, + n_agents_per_chunk=config.network.memory_config.chunk_size, + action_dim=action_dim, + net_config=config.network.net_config, + memory_config=config.network.memory_config, + action_space_type=action_space_type, + ) + + # Define optimiser. + lr = make_learning_rate(config.system.actor_lr, config) + optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(lr, eps=1e-5), + ) + + # Get mock inputs to initialise network. + init_obs = env.observation_spec().generate_value() + init_obs = tree.map(lambda x: x[jnp.newaxis, ...], init_obs) # Add batch dim + init_hs = get_init_hidden_state(config.network.net_config, config.arch.num_envs) + init_hs = tree.map(lambda x: x[0, jnp.newaxis], init_hs) + + # Initialise params and optimiser state. + params = sable_network.init( + net_key, + init_obs, + init_hs, + net_key, + method="get_actions", + ) + opt_state = optim.init(params) + + # Create fake hstates + minibatch_size = ( + config.arch.num_envs * config.system.rollout_length // config.system.num_minibatches + ) + dummy_actor_hs = get_init_hidden_state(config.network.net_config, config.arch.num_envs) + dummy_trainer_hs = get_init_hidden_state(config.network.net_config, minibatch_size) + + # Pack apply and update functions. + # Using dummy hstates, since we are not updating the hstates during training. + apply_fns = ( + partial( + sable_network.apply, method="get_actions", hstates=dummy_actor_hs + ), # Execution function + partial(sable_network.apply, hstates=dummy_trainer_hs), # Training function + ) + eval_apply_fn = partial(sable_network.apply, method="get_actions") + + # Get batched iterated update and replicate it to pmap it over cores. + learn = get_learner_fn(env, apply_fns, optim.update, config) + learn = jax.pmap(learn, axis_name="device") + + # Initialise environment states and timesteps: across devices and batches. + key, *env_keys = jax.random.split( + key, n_devices * config.system.update_batch_size * config.arch.num_envs + 1 + ) + env_states, timesteps = jax.vmap(env.reset, in_axes=(0))( + jnp.stack(env_keys), + ) + reshape_states = lambda x: x.reshape( + (n_devices, config.system.update_batch_size, config.arch.num_envs) + x.shape[1:] + ) + # (devices, update batch size, num_envs, ...) + env_states = tree.map(reshape_states, env_states) + timesteps = tree.map(reshape_states, timesteps) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, _ = loaded_checkpoint.restore_params(input_params=params) + # Update the params + params = restored_params + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + replicate_learner = (params, opt_state, step_keys) + + # Duplicate learner for update_batch_size. + broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size, *x.shape)) + replicate_learner = tree.map(broadcast, replicate_learner) + + # Duplicate learner across devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=jax.devices()) + + # Initialise learner state. + params, opt_state, step_keys = replicate_learner + + init_learner_state = LearnerState( + params=params, + opt_states=opt_state, + key=step_keys, + env_state=env_states, + timestep=timesteps, + ) + + return learn, eval_apply_fn, init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + n_devices = len(jax.devices()) + + # Create the enviroments for train and eval. + env, eval_env = environments.make(config) + + # PRNG keys. + key, key_e, net_key = jax.random.split(jax.random.PRNGKey(config.system.seed), num=3) + + # Setup learner. + learn, sable_execution_fn, learner_state = learner_setup(env, (key, net_key), config) + + # Setup evaluator. + def make_ff_sable_act_fn(actor_apply_fn: ActorApply) -> EvalActFn: + def eval_act_fn( + params: Params, timestep: TimeStep, key: chex.PRNGKey, actor_state: ActorState + ) -> Tuple[Action, Dict]: + output_action, _, _, _ = actor_apply_fn( # type: ignore + params, + observation=timestep.observation, + key=key, + ) + return output_action, {} + + return eval_act_fn + + # One key per device for evaluation. + eval_keys = jax.random.split(key_e, n_devices) + # Define Apply fn for evaluation. + # Create an hstate with only zeros. This will never be updated over timesteps, + # but will be updated between agents in a given timestep since ff_sable has no + # memory over time. + eval_batch_size = get_num_eval_envs(config, absolute_metric=False) + eval_hs = get_init_hidden_state(config.network.net_config, eval_batch_size) + sable_execution_fn = partial(sable_execution_fn, hstates=eval_hs) + eval_act_fn = make_ff_sable_act_fn(sable_execution_fn) + # Create evaluator + evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) + + # Calculate total timesteps. + config = check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + steps_per_rollout = ( + n_devices + * config.system.num_updates_per_eval + * config.system.rollout_length + * config.system.update_batch_size + * config.arch.num_envs + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Run experiment for a total number of evaluations. + max_episode_return = -jnp.inf + best_params = None + for eval_step in range(config.arch.num_evaluation): + # Train. + start_time = time.time() + + learner_output = learn(learner_state) + jax.block_until_ready(learner_output) + + # Log the results of the training. + elapsed_time = time.time() - start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + + # Prepare for evaluation. + trained_params = unreplicate_batch_dim(learner_state.params) + key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) + eval_keys = jnp.stack(eval_keys) + eval_keys = eval_keys.reshape(n_devices, -1) + # Evaluate. + eval_metrics = evaluator(trained_params, eval_keys, {}) + logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) + episode_return = jnp.mean(eval_metrics["episode_return"]) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(trained_params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + eval_batch_size = get_num_eval_envs(config, absolute_metric=True) + abs_hs = get_init_hidden_state(config.network.net_config, eval_batch_size) + sable_execution_fn = partial(sable_execution_fn, hstates=abs_hs) + eval_act_fn = make_ff_sable_act_fn(sable_execution_fn) + abs_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=True) + eval_keys = jax.random.split(key, n_devices) + + eval_metrics = abs_metric_evaluator(best_params, eval_keys, {}) + + t = int(steps_per_rollout * (eval_step + 1)) + logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + +@hydra.main( + config_path="../../../configs/default", + config_name="ff_sable.yaml", + version_base="1.2", +) +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "ff_sable" + + # Run experiment. + eval_performance = run_experiment(cfg) + print(f"{Fore.CYAN}{Style.BRIGHT}FF Sable experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/sable/anakin/rec_sable.py b/mava/systems/sable/anakin/rec_sable.py new file mode 100644 index 000000000..5f1a4c16e --- /dev/null +++ b/mava/systems/sable/anakin/rec_sable.py @@ -0,0 +1,693 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from functools import partial +from typing import Any, Callable, Dict, Tuple + +import chex +import flax +import hydra +import jax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flax.core.frozen_dict import FrozenDict as Params +from jax import tree +from jumanji.env import Environment +from jumanji.types import TimeStep +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import ActorState, EvalActFn, get_eval_fn, get_num_eval_envs +from mava.networks import SableNetwork +from mava.networks.utils.sable import get_init_hidden_state +from mava.systems.sable.types import ( + ActorApply, + HiddenStates, + LearnerApply, + Transition, +) +from mava.systems.sable.types import RecLearnerState as LearnerState +from mava.types import Action, ExperimentOutput, LearnerFn, MarlEnv +from mava.utils import make_env as environments +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import concat_time_and_agents, unreplicate_batch_dim, unreplicate_n_dims +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.utils.training import make_learning_rate +from mava.wrappers.episode_metrics import get_final_step_metrics + + +def get_learner_fn( + env: Environment, + apply_fns: Tuple[ActorApply, LearnerApply], + update_fn: optax.TransformUpdateFn, + config: DictConfig, +) -> LearnerFn[LearnerState]: + """Get the learner function.""" + + # Get apply functions for executing and training the network. + sable_action_select_fn, sable_apply_fn = apply_fns + + def _update_step(learner_state: LearnerState, _: Any) -> Tuple[LearnerState, Tuple]: + """A single update of the network. + + This function steps the environment and records the trajectory batch for + training. It then calculates advantages and targets based on the recorded + trajectory and updates the actor and critic networks based on the calculated + losses. + + Args: + ---- + learner_state (NamedTuple): + - params (FrozenDict): The current model parameters. + - opt_states (OptState): The current optimizer states. + - key (PRNGKey): The random number generator state. + - env_state (State): The environment state. + - last_timestep (TimeStep): The last timestep in the current trajectory. + - hstates (HiddenStates): The hidden state of the network. + _ (Any): The current metrics info. + + """ + + def _env_step(learner_state: LearnerState, _: int) -> Tuple[LearnerState, Transition]: + """Step the environment.""" + params, opt_states, key, env_state, last_timestep, hstates = learner_state + + # SELECT ACTION + key, policy_key = jax.random.split(key) + + # Apply the actor network to get the action, log_prob, value and updated hstates. + last_obs = last_timestep.observation + action, log_prob, value, hstates = sable_action_select_fn( # type: ignore + params, + last_obs, + hstates, + policy_key, + ) + + # STEP ENVIRONMENT + env_state, timestep = jax.vmap(env.step, in_axes=(0, 0))(env_state, action) + + # LOG EPISODE METRICS + info = tree.map( + lambda x: jnp.repeat(x[..., jnp.newaxis], config.system.num_agents, axis=-1), + timestep.extras["episode_metrics"], + ) + + # Reset hidden state if done. + done = timestep.last() + done = jnp.expand_dims(done, (1, 2, 3, 4)) + hstates = tree.map(lambda hs: jnp.where(done, jnp.zeros_like(hs), hs), hstates) + + # SET TRANSITION + prev_done = tree.map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + last_timestep.last(), + ) + transition = Transition( + prev_done, + action, + value, + timestep.reward, + log_prob, + last_timestep.observation, + info, + ) + learner_state = LearnerState(params, opt_states, key, env_state, timestep, hstates) + return learner_state, transition + + # COPY OLD HIDDEN STATES: TO BE USED IN THE TRAINING LOOP + prev_hstates = tree.map(lambda x: jnp.copy(x), learner_state.hstates) + + # STEP ENVIRONMENT FOR ROLLOUT LENGTH + learner_state, traj_batch = jax.lax.scan( + _env_step, + learner_state, + jnp.arange(config.system.rollout_length), + config.system.rollout_length, + ) + + # CALCULATE ADVANTAGE + params, opt_states, key, env_state, last_timestep, updated_hstates = learner_state + key, last_val_key = jax.random.split(key) + _, _, current_val, _ = sable_action_select_fn( # type: ignore + params, last_timestep.observation, updated_hstates, last_val_key + ) + current_done = tree.map( + lambda x: jnp.repeat(x, config.system.num_agents).reshape(config.arch.num_envs, -1), + last_timestep.last(), + ) + + def _calculate_gae( + traj_batch: Transition, + current_val: chex.Array, + current_done: chex.Array, + ) -> Tuple[chex.Array, chex.Array]: + """Calculate the GAE.""" + + def _get_advantages( + carry: Tuple[chex.Array, chex.Array, chex.Array], transition: Transition + ) -> Tuple[Tuple[chex.Array, chex.Array, chex.Array], chex.Array]: + """Calculate the GAE for a single transition.""" + gae, next_value, next_done = carry + done, value, reward = ( + transition.done, + transition.value, + transition.reward, + ) + gamma = config.system.gamma + delta = reward + gamma * next_value * (1 - next_done) - value + gae = delta + gamma * config.system.gae_lambda * (1 - next_done) * gae + return (gae, value, done), gae + + _, advantages = jax.lax.scan( + _get_advantages, + (jnp.zeros_like(current_val), current_val, current_done), + traj_batch, + reverse=True, + unroll=16, + ) + return advantages, advantages + traj_batch.value + + advantages, targets = _calculate_gae(traj_batch, current_val, current_done) + + def _update_epoch(update_state: Tuple, _: Any) -> Tuple: + """Update the network for a single epoch.""" + + def _update_minibatch(train_state: Tuple, batch_info: Tuple) -> Tuple: + """Update the network for a single minibatch.""" + # UNPACK TRAIN STATE AND BATCH INFO + params, opt_state = train_state + traj_batch, advantages, targets, prev_hstates = batch_info + + def _loss_fn( + params: Params, + traj_batch: Transition, + gae: chex.Array, + value_targets: chex.Array, + prev_hstates: HiddenStates, + ) -> Tuple: + """Calculate Sable loss.""" + # RERUN NETWORK + value, log_prob, entropy = sable_apply_fn( # type: ignore + params, + traj_batch.obs, + traj_batch.action, + prev_hstates, + traj_batch.done, + ) + + # CALCULATE ACTOR LOSS + ratio = jnp.exp(log_prob - traj_batch.log_prob) + gae = (gae - gae.mean()) / (gae.std() + 1e-8) + loss_actor1 = ratio * gae + loss_actor2 = ( + jnp.clip( + ratio, + 1.0 - config.system.clip_eps, + 1.0 + config.system.clip_eps, + ) + * gae + ) + loss_actor = -jnp.minimum(loss_actor1, loss_actor2) + loss_actor = loss_actor.mean() + entropy = entropy.mean() + + # CALCULATE VALUE LOSS + value_pred_clipped = traj_batch.value + (value - traj_batch.value).clip( + -config.system.clip_eps, config.system.clip_eps + ) + + # MSE LOSS + value_losses = jnp.square(value - value_targets) + value_losses_clipped = jnp.square(value_pred_clipped - value_targets) + value_loss = 0.5 * jnp.maximum(value_losses, value_losses_clipped).mean() + + # TOTAL LOSS + total_loss = ( + loss_actor + - config.system.ent_coef * entropy + + config.system.vf_coef * value_loss + ) + return total_loss, (loss_actor, entropy, value_loss) + + # CALCULATE ACTOR LOSS + grad_fn = jax.value_and_grad(_loss_fn, has_aux=True) + loss_info, grads = grad_fn( + params, + traj_batch, + advantages, + targets, + prev_hstates, + ) + + # Compute the parallel mean (pmean) over the batch. + # This calculation is inspired by the Anakin architecture demo notebook. + # available at https://tinyurl.com/26tdzs5x + # This pmean could be a regular mean as the batch axis is on the same device. + grads, loss_info = jax.lax.pmean((grads, loss_info), axis_name="batch") + # pmean over devices. + grads, loss_info = jax.lax.pmean((grads, loss_info), axis_name="device") + + # UPDATE PARAMS AND OPTIMISER STATE + updates, new_opt_state = update_fn(grads, opt_state) + new_params = optax.apply_updates(params, updates) + + # PACK LOSS INFO + total_loss = loss_info[0] + actor_loss = loss_info[1][0] + entropy = loss_info[1][1] + value_loss = loss_info[1][2] + loss_info = { + "total_loss": total_loss, + "value_loss": value_loss, + "actor_loss": actor_loss, + "entropy": entropy, + } + + return (new_params, new_opt_state), loss_info + + ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + prev_hstates, + ) = update_state + + # SHUFFLE MINIBATCHES + key, batch_shuffle_key, agent_shuffle_key = jax.random.split(key, 3) + + # Shuffle batch + batch_size = config.arch.num_envs + batch_perm = jax.random.permutation(batch_shuffle_key, batch_size) + batch = (traj_batch, advantages, targets) + batch = tree.map(lambda x: jnp.take(x, batch_perm, axis=1), batch) + + # Shuffle hidden states + prev_hstates = tree.map(lambda x: jnp.take(x, batch_perm, axis=0), prev_hstates) + + # Shuffle agents + agent_perm = jax.random.permutation(agent_shuffle_key, config.system.num_agents) + batch = tree.map(lambda x: jnp.take(x, agent_perm, axis=2), batch) + + # CONCATENATE TIME AND AGENTS + batch = tree.map(concat_time_and_agents, batch) + + # SPLIT INTO MINIBATCHES + minibatches = tree.map( + lambda x: jnp.reshape(x, (config.system.num_minibatches, -1, *x.shape[1:])), + batch, + ) + prev_hs_minibatch = tree.map( + lambda x: jnp.reshape(x, (config.system.num_minibatches, -1, *x.shape[1:])), + prev_hstates, + ) + + # UPDATE MINIBATCHES + (params, opt_states), loss_info = jax.lax.scan( + _update_minibatch, + (params, opt_states), + (*minibatches, prev_hs_minibatch), + ) + + update_state = ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + prev_hstates, + ) + return update_state, loss_info + + update_state = ( + params, + opt_states, + traj_batch, + advantages, + targets, + key, + prev_hstates, + ) + + # UPDATE EPOCHS + update_state, loss_info = jax.lax.scan( + _update_epoch, update_state, None, config.system.ppo_epochs + ) + + params, opt_states, traj_batch, advantages, targets, key, _ = update_state + learner_state = LearnerState( + params, + opt_states, + key, + env_state, + last_timestep, + updated_hstates, + ) + metric = traj_batch.info + return learner_state, (metric, loss_info) + + def learner_fn(learner_state: LearnerState) -> ExperimentOutput[LearnerState]: + """Learner function. + + This function represents the learner, it updates the network parameters + by iteratively applying the `_update_step` function for a fixed number of + updates. The `_update_step` function is vectorized over a batch of inputs. + + Args: + ---- + learner_state (NamedTuple): + - params (FrozenDict): The initial model parameters. + - opt_state (OptState): The initial optimizer state. + - key (chex.PRNGKey): The random number generator state. + - env_state (LogEnvState): The environment state. + - timesteps (TimeStep): The initial timestep in the initial trajectory. + - hstates (HiddenStates): The initial hidden states of the network. + + """ + batched_update_step = jax.vmap(_update_step, in_axes=(0, None), axis_name="batch") + + learner_state, (episode_info, loss_info) = jax.lax.scan( + batched_update_step, learner_state, None, config.system.num_updates_per_eval + ) + return ExperimentOutput( + learner_state=learner_state, + episode_metrics=episode_info, + train_metrics=loss_info, + ) + + return learner_fn + + +def learner_setup( + env: MarlEnv, keys: chex.Array, config: DictConfig +) -> Tuple[LearnerFn[LearnerState], Callable, LearnerState]: + """Initialise learner_fn, network, optimiser, environment and states.""" + # Get available TPU cores. + n_devices = len(jax.devices()) + + # Get number of agents. + config.system.num_agents = env.num_agents + + # PRNG keys. + key, net_key = keys + + # Get number of agents and actions. + action_dim = int(env.action_spec().num_values[0]) + n_agents = env.action_spec().shape[0] + config.system.num_agents = n_agents + config.system.num_actions = action_dim + + # Setting the chunksize - smaller chunks save memory at the cost of speed + if config.network.memory_config.timestep_chunk_size: + config.network.memory_config.chunk_size = ( + config.network.memory_config.timestep_chunk_size * n_agents + ) + else: + config.network.memory_config.chunk_size = config.system.rollout_length * n_agents + + _, action_space_type = get_action_head(env) + + # Define network. + sable_network = SableNetwork( + n_agents=n_agents, + n_agents_per_chunk=n_agents, + action_dim=action_dim, + net_config=config.network.net_config, + memory_config=config.network.memory_config, + action_space_type=action_space_type, + ) + + # Define optimiser. + lr = make_learning_rate(config.system.actor_lr, config) + optim = optax.chain( + optax.clip_by_global_norm(config.system.max_grad_norm), + optax.adam(lr, eps=1e-5), + ) + + # Get mock inputs to initialise network. + init_obs = env.observation_spec().generate_value() + init_obs = tree.map(lambda x: x[jnp.newaxis, ...], init_obs) # Add batch dim + init_hs = get_init_hidden_state(config.network.net_config, config.arch.num_envs) + init_hs = tree.map(lambda x: x[0, jnp.newaxis], init_hs) + + # Initialise params and optimiser state. + params = sable_network.init( + net_key, + init_obs, + init_hs, + net_key, + method="get_actions", + ) + opt_state = optim.init(params) + + # Pack apply and update functions. + apply_fns = ( + partial(sable_network.apply, method="get_actions"), # Execution function + sable_network.apply, # Training function + ) + + # Get batched iterated update and replicate it to pmap it over cores. + learn = get_learner_fn(env, apply_fns, optim.update, config) + learn = jax.pmap(learn, axis_name="device") + + # Initialise environment states and timesteps: across devices and batches. + key, *env_keys = jax.random.split( + key, n_devices * config.system.update_batch_size * config.arch.num_envs + 1 + ) + env_states, timesteps = jax.vmap(env.reset, in_axes=(0))( + jnp.stack(env_keys), + ) + reshape_states = lambda x: x.reshape( + (n_devices, config.system.update_batch_size, config.arch.num_envs) + x.shape[1:] + ) + # (devices, update batch size, num_envs, ...) + env_states = tree.map(reshape_states, env_states) + timesteps = tree.map(reshape_states, timesteps) + + # Initialise hidden state. + init_hstates = get_init_hidden_state(config.network.net_config, config.arch.num_envs) + + # Load model from checkpoint if specified. + if config.logger.checkpointing.load_model: + loaded_checkpoint = Checkpointer( + model_name=config.logger.system_name, + **config.logger.checkpointing.load_args, # Other checkpoint args + ) + # Restore the learner state from the checkpoint + restored_params, restored_hstates = loaded_checkpoint.restore_params( + input_params=params, restore_hstates=True, THiddenState=HiddenStates + ) + # Update the params and hidden states + params = restored_params + init_hstates = restored_hstates if restored_hstates else init_hstates + + # Define params to be replicated across devices and batches. + key, step_keys = jax.random.split(key) + replicate_learner = (params, opt_state, step_keys) + + # Duplicate learner for update_batch_size. + broadcast = lambda x: jnp.broadcast_to(x, (config.system.update_batch_size, *x.shape)) + replicate_learner = tree.map(broadcast, replicate_learner) + init_hstates = tree.map(broadcast, init_hstates) + + # Duplicate learner across devices. + replicate_learner = flax.jax_utils.replicate(replicate_learner, devices=jax.devices()) + init_hstates = flax.jax_utils.replicate(init_hstates, devices=jax.devices()) + + # Initialise learner state. + params, opt_state, step_keys = replicate_learner + + init_learner_state = LearnerState( + params=params, + opt_states=opt_state, + key=step_keys, + env_state=env_states, + timestep=timesteps, + hstates=init_hstates, + ) + + return learn, apply_fns[0], init_learner_state + + +def run_experiment(_config: DictConfig) -> float: + """Runs experiment.""" + config = copy.deepcopy(_config) + + n_devices = len(jax.devices()) + + # Create the enviroments for train and eval. + env, eval_env = environments.make(config) + + # PRNG keys. + key, key_e, net_key = jax.random.split(jax.random.PRNGKey(config.system.seed), num=3) + + # Setup learner. + learn, sable_execution_fn, learner_state = learner_setup(env, (key, net_key), config) + + # Setup evaluator. + def make_rec_sable_act_fn(actor_apply_fn: ActorApply) -> EvalActFn: + _hidden_state = "hidden_state" + + def eval_act_fn( + params: Params, timestep: TimeStep, key: chex.PRNGKey, actor_state: ActorState + ) -> Tuple[Action, Dict]: + hidden_state = actor_state[_hidden_state] + output_action, _, _, hidden_state = actor_apply_fn( # type: ignore + params, + timestep.observation, + hidden_state, + key, + ) + return output_action, {_hidden_state: hidden_state} + + return eval_act_fn + + # One key per device for evaluation. + eval_keys = jax.random.split(key_e, n_devices) + eval_act_fn = make_rec_sable_act_fn(sable_execution_fn) + evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=False) + + # Calculate total timesteps. + config = check_total_timesteps(config) + assert ( + config.system.num_updates > config.arch.num_evaluation + ), "Number of updates per evaluation must be less than total number of updates." + + # Calculate number of updates per evaluation. + config.system.num_updates_per_eval = config.system.num_updates // config.arch.num_evaluation + steps_per_rollout = ( + n_devices + * config.system.num_updates_per_eval + * config.system.rollout_length + * config.system.update_batch_size + * config.arch.num_envs + ) + + # Logger setup + logger = MavaLogger(config) + cfg: Dict = OmegaConf.to_container(config, resolve=True) + cfg["arch"]["devices"] = jax.devices() + pprint(cfg) + + # Set up checkpointer + save_checkpoint = config.logger.checkpointing.save_model + if save_checkpoint: + checkpointer = Checkpointer( + metadata=config, # Save all config as metadata in the checkpoint + model_name=config.logger.system_name, + **config.logger.checkpointing.save_args, # Checkpoint args + ) + + # Create an initial hidden state used for resetting memory for evaluation + eval_batch_size = get_num_eval_envs(config, absolute_metric=False) + eval_hs = get_init_hidden_state(config.network.net_config, eval_batch_size) + eval_hs = flax.jax_utils.replicate(eval_hs, devices=jax.devices()) + + # Run experiment for a total number of evaluations. + max_episode_return = -jnp.inf + best_params = None + for eval_step in range(config.arch.num_evaluation): + # Train. + start_time = time.time() + + learner_output = learn(learner_state) + jax.block_until_ready(learner_output) + + # Log the results of the training. + elapsed_time = time.time() - start_time + t = int(steps_per_rollout * (eval_step + 1)) + episode_metrics, ep_completed = get_final_step_metrics(learner_output.episode_metrics) + episode_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + + # Separately log timesteps, actoring metrics and training metrics. + logger.log({"timestep": t}, t, eval_step, LogEvent.MISC) + if ep_completed: # only log episode metrics if an episode was completed in the rollout. + logger.log(episode_metrics, t, eval_step, LogEvent.ACT) + logger.log(learner_output.train_metrics, t, eval_step, LogEvent.TRAIN) + + # Prepare for evaluation. + trained_params = unreplicate_batch_dim(learner_state.params) + key_e, *eval_keys = jax.random.split(key_e, n_devices + 1) + eval_keys = jnp.stack(eval_keys) + eval_keys = eval_keys.reshape(n_devices, -1) + # Evaluate. + eval_metrics = evaluator(trained_params, eval_keys, {"hidden_state": eval_hs}) + logger.log(eval_metrics, t, eval_step, LogEvent.EVAL) + episode_return = jnp.mean(eval_metrics["episode_return"]) + + if save_checkpoint: + # Save checkpoint of learner state + checkpointer.save( + timestep=steps_per_rollout * (eval_step + 1), + unreplicated_learner_state=unreplicate_n_dims(learner_output.learner_state), + episode_return=episode_return, + ) + + if config.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(trained_params) + max_episode_return = episode_return + + # Update runner state to continue training. + learner_state = learner_output.learner_state + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(eval_metrics[config.env.eval_metric])) + + # Measure absolute metric. + if config.arch.absolute_metric: + eval_batch_size = get_num_eval_envs(config, absolute_metric=True) + abs_hs = get_init_hidden_state(config.network.net_config, eval_batch_size) + abs_hs = tree.map(lambda x: x[jnp.newaxis], abs_hs) + abs_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, config, absolute_metric=True) + eval_keys = jax.random.split(key, n_devices) + + eval_metrics = abs_metric_evaluator(best_params, eval_keys, {"hidden_state": abs_hs}) + + t = int(steps_per_rollout * (eval_step + 1)) + logger.log(eval_metrics, t, eval_step, LogEvent.ABSOLUTE) + + # Stop the logger. + logger.stop() + + return eval_performance + + +@hydra.main( + config_path="../../../configs/default", + config_name="rec_sable.yaml", + version_base="1.2", +) +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "rec_sable" + + # Run experiment. + eval_performance = run_experiment(cfg) + print(f"{Fore.CYAN}{Style.BRIGHT}Rec Sable experiment completed{Style.RESET_ALL}") + return eval_performance + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/sable/types.py b/mava/systems/sable/types.py new file mode 100644 index 000000000..c93d3bf48 --- /dev/null +++ b/mava/systems/sable/types.py @@ -0,0 +1,79 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, Tuple + +from chex import Array, PRNGKey +from flax.core.frozen_dict import FrozenDict +from jumanji.types import TimeStep +from optax._src.base import OptState +from typing_extensions import NamedTuple + + +class SableNetworkConfig(NamedTuple): + """Configuration for the Sable network.""" + + n_block: int + n_head: int + embed_dim: int + + +class HiddenStates(NamedTuple): + """Hidden states for the encoder and decoder.""" + + encoder: Array + decoder_self_retn: Array + decoder_cross_retn: Array + + +class RecLearnerState(NamedTuple): + """State of the learner for Memory Sable""" + + params: FrozenDict + opt_states: OptState + key: PRNGKey + env_state: Array + timestep: TimeStep + hstates: HiddenStates + + +class FFLearnerState(NamedTuple): + """State of the learner for ff-Sable""" + + params: FrozenDict + opt_states: OptState + key: PRNGKey + env_state: Array + timestep: TimeStep + + +class Transition(NamedTuple): + """Transition tuple.""" + + done: Array + action: Array + value: Array + reward: Array + log_prob: Array + obs: Array + info: Dict + + +ActorApply = Callable[ + [FrozenDict, Array, Array, HiddenStates, PRNGKey], + Tuple[Array, Array, Array, Array, HiddenStates], +] +LearnerApply = Callable[ + [FrozenDict, Array, Array, Array, HiddenStates, Array, PRNGKey], Tuple[Array, Array, Array] +] diff --git a/mava/systems/sac/anakin/ff_hasac.py b/mava/systems/sac/anakin/ff_hasac.py new file mode 100644 index 000000000..0ea26ba9e --- /dev/null +++ b/mava/systems/sac/anakin/ff_hasac.py @@ -0,0 +1,729 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +from typing import Any, Callable, Dict, Tuple, Union + +import chex +import flashbax as fbx +import hydra +import jax +import jax.lax as lax +import jax.numpy as jnp +import optax +from colorama import Fore, Style +from flashbax.buffers.flat_buffer import TrajectoryBuffer +from flax.core import FrozenDict +from flax.core.scope import FrozenVariableDict +from jax import Array, tree +from jumanji.env import State +from jumanji.types import TimeStep +from omegaconf import DictConfig, OmegaConf +from rich.pretty import pprint + +from mava.evaluator import ActorState, get_eval_fn +from mava.networks import FeedForwardActor as Actor +from mava.networks import FeedForwardQNet as QNetwork +from mava.systems.sac.types import ( + BufferState, + LearnerState, + Metrics, + Networks, + Optimisers, + OptStates, + QVals, + QValsAndTarget, + SacParams, + Transition, +) +from mava.types import Action, MarlEnv, Observation, ObservationGlobalState +from mava.utils import make_env as environments +from mava.utils.centralised_training import get_joint_action +from mava.utils.checkpointing import Checkpointer +from mava.utils.jax_utils import ( + tree_at_set, + tree_slice, + unreplicate_batch_dim, + unreplicate_n_dims, +) +from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head +from mava.utils.total_timestep_checker import check_total_timesteps +from mava.wrappers import episode_metrics + +# General shape comment guideline: +# B: batch size +# N: num agents +# A: action dim + + +# It is faster to do this with a vmap, but unfortunately that requires switching to numpyro. +# This requires a lot of testing so there is currently an issue for it: #1098 +def get_actions( + actor_params: FrozenVariableDict, + actor_net: Actor, + keys: chex.PRNGKey, + num_agents: int, + action_dim: int, + obs: Union[Observation, ObservationGlobalState], +) -> Tuple[chex.Array, chex.Array]: + batch_size = obs.agents_view.shape[0] + + actions = jnp.zeros((batch_size, num_agents, action_dim)) + log_std = jnp.zeros((batch_size, num_agents)) + + for agent in range(num_agents): + actor_params_per_agent = tree.map(lambda x, agent=agent: x[agent], actor_params) + obs_per_agent = tree.map(lambda x, agent=agent: x[:, agent], obs) + + pi = actor_net.apply(actor_params_per_agent, obs_per_agent) + action = pi.sample(seed=keys[agent]) + actions = actions.at[:, agent].set(action) + log_std = log_std.at[:, agent].set(pi.log_prob(action)) + + return actions, log_std + + +def init( + cfg: DictConfig, +) -> Tuple[ + Tuple[MarlEnv, MarlEnv], + Networks, + Optimisers, + TrajectoryBuffer, + LearnerState, + Array, + MavaLogger, + chex.PRNGKey, +]: + """Initialize system by creating the envs, networks etc. + + Args: + ---- + cfg: System configuration. + + Returns: + ------- + Tuple containing: + Tuple[MarlEnv, MarlEnv]: The environment and evaluation environment. + Networks: Tuple of actor and critic networks. + Optimisers: Tuple of actor, critic and alpha optimisers. + TrajectoryBuffer: The replay buffer. + LearnerState: The initial learner state. + Array: The target entropy. + MavaLogger: The logger. + PRNGKey: The random key. + """ + logger = MavaLogger(cfg) + + key = jax.random.PRNGKey(cfg.system.seed) + devices = jax.devices() + + def replicate(x: Any) -> Any: + """First replicate the update batch dim then put on devices.""" + x = tree.map(lambda y: jnp.broadcast_to(y, (cfg.system.update_batch_size, *y.shape)), x) + return jax.device_put_replicated(x, devices) + + env, eval_env = environments.make(cfg, add_global_state=True) + + n_agents = env.num_agents + action_dim = env.action_dim + + key, actor_key, q1_key, q2_key, q1_target_key, q2_target_key = jax.random.split(key, 6) + actor_keys = jax.random.split(actor_key, n_agents) + + acts = env.action_spec().generate_value() # all agents actions + act_single = acts[0] # single agents action + concat_acts = jnp.concatenate([act_single for _ in range(n_agents)], axis=0) + concat_acts_batched = concat_acts[jnp.newaxis, ...] # batch + concat of all agents actions + obs = env.observation_spec().generate_value() + obs_single_batched = tree.map(lambda x: x[0][jnp.newaxis, ...], obs) + + # Making actor network + actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) + action_head, _ = get_action_head(env) + actor_action_head = hydra.utils.instantiate( + action_head, action_dim=env.action_dim, independent_std=False + ) + actor_network = Actor(actor_torso, actor_action_head) + # `vmap` creates separate parameters per agent. + actor_params = jax.vmap(actor_network.init, in_axes=(0, None))(actor_keys, obs_single_batched) + + # Making Q networks + critic_torso = hydra.utils.instantiate(cfg.network.critic_network.pre_torso) + q_network = QNetwork(critic_torso, centralised_critic=True) + q1_params = q_network.init(q1_key, obs_single_batched, concat_acts_batched) + q2_params = q_network.init(q2_key, obs_single_batched, concat_acts_batched) + # obs_single_batched contains the global state which the QNetwork's condition on + q1_target_params = q_network.init(q1_target_key, obs_single_batched, concat_acts_batched) + q2_target_params = q_network.init(q2_target_key, obs_single_batched, concat_acts_batched) + + # Automatic entropy tuning + target_entropy = -cfg.system.target_entropy_scale * action_dim + target_entropy = jnp.repeat(target_entropy, n_agents).astype(float) + # making sure we have shape=(B, N) so broacasting works fine + target_entropy = target_entropy[jnp.newaxis, :] + if cfg.system.autotune: + log_alpha = jnp.zeros_like(target_entropy) + else: + log_alpha = jnp.log(cfg.system.init_alpha) + log_alpha = jnp.broadcast_to(log_alpha, target_entropy.shape) + + # Pack params + online_q_params = QVals(q1_params, q2_params) + target_q_params = QVals(q1_target_params, q2_target_params) + params = SacParams(actor_params, QValsAndTarget(online_q_params, target_q_params), log_alpha) + + # Make opt states. + grad_clip = optax.clip_by_global_norm(cfg.system.max_grad_norm) + + actor_opt = optax.chain(grad_clip, optax.adam(cfg.system.policy_lr)) + actor_opt_state = jax.vmap(actor_opt.init)(params.actor) + + q_opt = optax.chain(grad_clip, optax.adam(cfg.system.q_lr)) + q_opt_state = q_opt.init(params.q.online) + + alpha_opt = optax.chain(grad_clip, optax.adam(cfg.system.alpha_lr)) + alpha_opt_state = jax.vmap(alpha_opt.init)(params.log_alpha) + + # Pack opt states + opt_states = OptStates(actor_opt_state, q_opt_state, alpha_opt_state) + + # Distribute params and opt states across all devices + params = replicate(params) + opt_states = replicate(opt_states) + + # Create replay buffer + init_transition = Transition( + obs=obs, + action=acts, + reward=jnp.zeros((n_agents,), dtype=float), + done=jnp.zeros((n_agents,), dtype=bool), + next_obs=obs, + ) + + rb = fbx.make_item_buffer( + max_length=int(cfg.system.buffer_size), + min_length=int(cfg.system.explore_steps), + sample_batch_size=int(cfg.system.batch_size), + add_batches=True, + ) + buffer_state = replicate(rb.init(init_transition)) + + networks = (actor_network, q_network) + optims = (actor_opt, q_opt, alpha_opt) + + # Reset env. + n_keys = cfg.arch.num_envs * cfg.arch.n_devices * cfg.system.update_batch_size + key_shape = (cfg.arch.n_devices, cfg.system.update_batch_size, cfg.arch.num_envs, -1) + key, reset_key = jax.random.split(key) + reset_keys = jax.random.split(reset_key, n_keys) + reset_keys = jnp.reshape(reset_keys, key_shape) + + # Keys passed to learner + first_keys = jax.random.split(key, (cfg.arch.n_devices * cfg.system.update_batch_size)) + first_keys = first_keys.reshape((cfg.arch.n_devices, cfg.system.update_batch_size, -1)) + + env_state, first_timestep = jax.pmap( # devices + jax.vmap( # update_batch_size + jax.vmap(env.reset), # num_envs + axis_name="batch", + ), + axis_name="device", + )(reset_keys) + first_obs = first_timestep.observation + + t = jnp.zeros((cfg.arch.n_devices, cfg.system.update_batch_size), dtype=int) + + # Initial learner state. + learner_state = LearnerState( + first_obs, env_state, buffer_state, params, opt_states, t, first_keys + ) + return (env, eval_env), networks, optims, rb, learner_state, target_entropy, logger, key + + +def make_update_fns( + cfg: DictConfig, + env: MarlEnv, + networks: Networks, + optims: Optimisers, + rb: TrajectoryBuffer, + target_entropy: chex.Array, +) -> Tuple[ + Callable[[LearnerState], Tuple[LearnerState, Metrics]], + Callable[[LearnerState], Tuple[LearnerState, Tuple[Metrics, Metrics]]], +]: + """Create the update functions for the learner. + + Args: + ---- + cfg: System configuration. + env: The environment. + networks: Tuple of actor and critic networks. + optims: Tuple of actor, critic and alpha optimisers. + rb: The replay buffer. + target_entropy: The target entropy. + + Returns: + ------- + Tuple of (explore_fn, update_fn). + Explore function is used for initial exploration with random actions. + Update function is the main learning function, it both acts and learns. + """ + actor_net, q_net = networks + actor_opt, q_opt, alpha_opt = optims + + full_action_shape = (cfg.arch.num_envs, *env.action_spec().shape) + + # losses: + def q_loss_fn( + q_params: QVals, obs: Array, action: Array, target: Array + ) -> Tuple[Array, Metrics]: + q1_params, q2_params = q_params + # Concat all actions and tile them for num agents to create joint actions for all agents + joint_action = get_joint_action(action) # (B, N, A) -> (N, N, N * A) + + q1_a_values = q_net.apply(q1_params, obs, joint_action) + q2_a_values = q_net.apply(q2_params, obs, joint_action) + + q1_loss = jnp.mean(jnp.square(q1_a_values - target)) + q2_loss = jnp.mean(jnp.square(q2_a_values - target)) + + loss = q1_loss + q2_loss + loss_info = { + "loss": loss, + "q1_loss": q1_loss, + "q2_loss": q2_loss, + "q1_a_vals": q1_a_values, + "q2_a_vals": q2_a_values, + } + + return loss, loss_info + + def actor_loss_fn( + actor_params: FrozenVariableDict, + obs: ObservationGlobalState, + actions: Array, + alpha: Array, + q_params: QVals, + key: chex.PRNGKey, + agent_id: int, + ) -> Array: + batch_size = actions.shape[0] + pi = actor_net.apply(actor_params, obs) + new_actions = pi.sample(seed=key) + log_prob = pi.log_prob(new_actions) + + joint_actions = actions.at[:, agent_id, :].set(new_actions).reshape(batch_size, -1) + + qval_1 = q_net.apply(q_params.q1, obs, joint_actions) + qval_2 = q_net.apply(q_params.q2, obs, joint_actions) + min_q_val = jnp.minimum(qval_1, qval_2) + + return ((alpha[:, agent_id] * log_prob) - min_q_val).mean() + + def alpha_loss_fn(log_alpha: Array, log_pi: Array, target_entropy: Array) -> Array: + return jnp.mean(-jnp.exp(log_alpha) * (log_pi + target_entropy)) + + # Update functions: + def update_q( + params: SacParams, opt_states: OptStates, data: Transition, key: chex.PRNGKey + ) -> Tuple[SacParams, OptStates, Metrics]: + """Update the Q parameters.""" + # Calculate Q target values. + act_keys = jax.random.split(key, env.num_agents) + next_action, next_log_prob = get_actions( + params.actor, actor_net, act_keys, env.num_agents, env.action_dim, data.next_obs + ) + + # Concat all actions and tile them for num agents to create joint actions for all agents + joint_next_actions = get_joint_action(next_action) # (B, N, A) -> (B, N, N * A) + next_q1_val = q_net.apply(params.q.targets.q1, data.next_obs, joint_next_actions) + next_q2_val = q_net.apply(params.q.targets.q2, data.next_obs, joint_next_actions) + next_q_val = jnp.minimum(next_q1_val, next_q2_val) + next_q_val = next_q_val - jnp.exp(params.log_alpha) * next_log_prob + + target_q_val = data.reward + (1.0 - data.done) * cfg.system.gamma * next_q_val # (B, A, 1) + + # Update Q function. + q_grad_fn = jax.grad(q_loss_fn, has_aux=True) + q_grads, q_loss_info = q_grad_fn(params.q.online, data.obs, data.action, target_q_val) + # Mean over the device and batch dimension. + q_grads, q_loss_info = lax.pmean((q_grads, q_loss_info), axis_name="device") + q_grads, q_loss_info = lax.pmean((q_grads, q_loss_info), axis_name="batch") + q_updates, new_q_opt_state = q_opt.update(q_grads, opt_states.q) + new_online_q_params = optax.apply_updates(params.q.online, q_updates) + + # Target network polyak update. + new_target_q_params = optax.incremental_update( + new_online_q_params, params.q.targets, cfg.system.tau + ) + + # Repack params and opt_states. + q_and_target = QValsAndTarget(new_online_q_params, new_target_q_params) + params = params._replace(q=q_and_target) + opt_states = opt_states._replace(q=new_q_opt_state) + + return params, opt_states, q_loss_info + + def update_actor_and_alpha( + params: SacParams, opt_states: OptStates, data: Transition, key: chex.PRNGKey + ) -> Tuple[SacParams, OptStates, Metrics]: + """Update the actor and alpha parameters. Compensated for the delay in policy updates.""" + alpha_grad_fn = jax.value_and_grad(alpha_loss_fn) + actor_grad_fn = jax.value_and_grad(actor_loss_fn) + + # compensate for the delay by doing `policy_frequency` updates instead of 1. + assert cfg.system.policy_update_delay > 0, "Need to have a policy update delay > 0." + for _ in range(cfg.system.policy_update_delay): + key, act_key, agent_order_key = jax.random.split(key, 3) + act_keys = jax.random.split(act_key, env.num_agents) + if cfg.system.shuffle_agents: + agent_ids = jax.random.permutation(agent_order_key, env.num_agents) + else: + agent_ids = jnp.arange(env.num_agents) + + # Joint actions and log probs per agent. + # These will be sequentially updated after each agent's grad step. + joint_actions, log_probs = get_actions( + params.actor, actor_net, act_keys, env.num_agents, env.action_dim, data.obs + ) # (B, N, A), (B, N) + + # HASAC sequential update: run the normal actor update one at a time instead of batched. + # Update the joint actions after updating the actor and use the new joint actions + # in subsequent updates. + for agent_id in agent_ids: + key, actor_key = jax.random.split(key) + + # Select current agent's params/opt/obs: (N, ...) -> (...) + agent_params = tree_slice(params.actor, agent_id) + agent_opt_state = tree_slice(opt_states.actor, agent_id) + # jnp.s_ allows passing slices as a variables + agent_obs = tree_slice(data.obs, jnp.s_[:, agent_id]) + + # Update actor. + act_loss, grads = actor_grad_fn( + agent_params, + agent_obs, + joint_actions, + jnp.exp(params.log_alpha), + params.q.online, + actor_key, + agent_id, + ) + # Mean over the device and batch dimensions. + act_loss, grads = lax.pmean((act_loss, grads), axis_name="device") + act_loss, grads = lax.pmean((act_loss, grads), axis_name="batch") + updates, new_agent_opt_state = actor_opt.update(grads, agent_opt_state) + new_agent_params = optax.apply_updates(agent_params, updates) + + # update actions list with new action from updated actor + pi = actor_net.apply(new_agent_params, agent_obs) + new_action = pi.sample(seed=key) + + # Add new action to list of actions + joint_actions = joint_actions.at[:, agent_id].set(new_action) + # Update global params and opt states + all_actor_params = tree_at_set(params.actor, agent_id, new_agent_params) + all_opt_states = tree_at_set(opt_states.actor, agent_id, new_agent_opt_state) + params = params._replace(actor=all_actor_params) + opt_states = opt_states._replace(actor=all_opt_states) + + # Update alpha if autotuning + alpha_loss = 0.0 # loss is 0 if autotune is off + if cfg.system.autotune: + alpha_opt_state = tree_slice(opt_states.alpha, agent_id) # (N, ...) -> (...) + + alpha_loss, grads = alpha_grad_fn( + params.log_alpha[:, agent_id], + log_probs[:, agent_id], + target_entropy[:, agent_id], + ) + alpha_loss, grads = lax.pmean((alpha_loss, grads), axis_name="device") + alpha_loss, grads = lax.pmean((alpha_loss, grads), axis_name="batch") + updates, new_alpha_opt_state = alpha_opt.update(grads, alpha_opt_state) + new_log_alpha = optax.apply_updates(params.log_alpha[:, agent_id], updates) + # Update global params/opt states + new_log_alphas = tree_at_set(params.log_alpha, agent_id, new_log_alpha) + new_alpha_opt_states = tree_at_set( + opt_states.alpha, agent_id, new_alpha_opt_state + ) + params = params._replace(log_alpha=new_log_alphas) + opt_states = opt_states._replace(alpha=new_alpha_opt_states) + + loss_info = {"actor_loss": act_loss, "alpha_loss": alpha_loss} + return params, opt_states, loss_info + + # Act/learn loops: + def train( + carry: Tuple[BufferState, SacParams, OptStates, int, chex.PRNGKey], _: Any + ) -> Tuple[Tuple[BufferState, SacParams, OptStates, int, chex.PRNGKey], Metrics]: + """Update the Q function and optionally policy/alpha with TD3 delayed update.""" + buffer_state, params, opt_states, t, key = carry + key, buff_key, q_key, actor_key = jax.random.split(key, 4) + + # sample + data = rb.sample(buffer_state, buff_key).experience # (B, N, ...) + + # learn + params, opt_states, q_loss_info = update_q(params, opt_states, data, q_key) + params, opt_states, act_loss_info = lax.cond( + t % cfg.system.policy_update_delay == 0, # TD 3 Delayed update support + update_actor_and_alpha, + # just return same params and opt_states and 0 for losses + lambda params, opt_states, *_: ( + params, + opt_states, + {"actor_loss": 0.0, "alpha_loss": 0.0}, + ), + params, + opt_states, + data, + actor_key, + ) + + losses = q_loss_info | act_loss_info + + return (buffer_state, params, opt_states, t, key), losses + + # Acting + def step( + action: Array, obs: ObservationGlobalState, env_state: State, buffer_state: BufferState + ) -> Tuple[Array, State, BufferState, Dict]: + """Given an action, step the environment and add to the buffer.""" + env_state, timestep = jax.vmap(env.step)(env_state, action) + next_obs = timestep.observation + rewards = timestep.reward + terms = ~timestep.discount.astype(bool) + infos = timestep.extras + + real_next_obs = infos["real_next_obs"] + + transition = Transition(obs, action, rewards, terms, real_next_obs) + buffer_state = rb.add(buffer_state, transition) + + return next_obs, env_state, buffer_state, infos["episode_metrics"] + + def act( + carry: Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], _: Any + ) -> Tuple[Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], Dict]: + """Acting loop: select action, step env, add to buffer.""" + actor_params, obs, env_state, buffer_state, key = carry + key, act_key = jax.random.split(key) + act_keys = jax.random.split(act_key, env.num_agents) + + actions, _ = get_actions( + actor_params, actor_net, act_keys, env.num_agents, env.action_dim, obs + ) + + next_obs, env_state, buffer_state, metrics = step(actions, obs, env_state, buffer_state) + return (actor_params, next_obs, env_state, buffer_state, key), metrics + + def explore(carry: LearnerState, _: Any) -> Tuple[LearnerState, Metrics]: + """Take random actions to fill up buffer at the start of training.""" + obs, env_state, buffer_state, _, _, t, key = carry + # mypy thinks it's Observation | ObservationGlobalState + assert isinstance(obs, ObservationGlobalState) + + key, explore_key = jax.random.split(key) + action = jax.random.uniform(explore_key, full_action_shape) + next_obs, env_state, buffer_state, metrics = step(action, obs, env_state, buffer_state) + + t += cfg.arch.num_envs + learner_state = carry._replace( + obs=next_obs, env_state=env_state, buffer_state=buffer_state, t=t, key=key + ) + return learner_state, metrics + + scanned_train = lambda state: lax.scan(train, state, None, length=cfg.system.epochs) + scanned_act = lambda state: lax.scan(act, state, None, length=cfg.system.rollout_length) + + # Act loop -> sample -> update loop + def update_step(carry: LearnerState, _: Any) -> Tuple[LearnerState, Tuple[Metrics, Metrics]]: + """Act, sample, learn. The body of the main SAC loop.""" + obs, env_state, buffer_state, params, opt_states, t, key = carry + key, act_key, learn_key = jax.random.split(key, 3) + # Act + act_state = (params.actor, obs, env_state, buffer_state, act_key) + (_, next_obs, env_state, buffer_state, _), metrics = scanned_act(act_state) + + # Sample and learn + learn_state = (buffer_state, params, opt_states, t, learn_key) + (buffer_state, params, opt_states, _, _), losses = scanned_train(learn_state) + + t += cfg.arch.num_envs * cfg.system.rollout_length + return ( + LearnerState(next_obs, env_state, buffer_state, params, opt_states, t, key), + (metrics, losses), + ) + + # pmap and scan over explore and update_step + # Make sure to not do num_envs explore steps (could fill up the buffer too much). + explore_steps = cfg.system.explore_steps // cfg.arch.num_envs + pmaped_explore = jax.pmap( + jax.vmap( + lambda state: lax.scan(explore, state, None, length=explore_steps), + axis_name="batch", + ), + axis_name="device", + donate_argnums=0, + ) + pmaped_update_step = jax.pmap( + jax.vmap( + lambda state: lax.scan(update_step, state, None, length=cfg.system.scan_steps), + axis_name="batch", + ), + axis_name="device", + donate_argnums=0, + ) + + return pmaped_explore, pmaped_update_step + + +def run_experiment(cfg: DictConfig) -> float: + # Add runtime variables to config + cfg.arch.n_devices = len(jax.devices()) + cfg = check_total_timesteps(cfg) + + # Number of env steps before evaluating/logging. + steps_per_rollout = int(cfg.system.total_timesteps // cfg.arch.num_evaluation) + # Multiplier for a single env/learn step in an anakin system + anakin_steps = cfg.arch.n_devices * cfg.system.update_batch_size + # Number of env steps in one anakin style update. + anakin_act_steps = anakin_steps * cfg.arch.num_envs * cfg.system.rollout_length + # Number of steps to do in the scanned update method (how many anakin steps). + cfg.system.scan_steps = int(steps_per_rollout / anakin_act_steps) + + pprint(OmegaConf.to_container(cfg, resolve=True)) + + # Initialize system and make learning functions. + (env, eval_env), networks, optims, rb, learner_state, target_entropy, logger, key = init(cfg) + explore, update = make_update_fns(cfg, env, networks, optims, rb, target_entropy) + + actor, _ = networks + key, eval_key = jax.random.split(key) + + def eval_act_fn( + params: FrozenDict, timestep: TimeStep, key: chex.PRNGKey, actor_state: ActorState + ) -> Tuple[Action, Dict]: + keys = jax.random.split(key, eval_env.num_agents) + action, _ = get_actions( + params, actor, keys, eval_env.num_agents, eval_env.action_dim, timestep.observation + ) + return action, {} + + evaluator = get_eval_fn(eval_env, eval_act_fn, cfg, absolute_metric=False) + + if cfg.logger.checkpointing.save_model: + checkpointer = Checkpointer( + metadata=cfg, # Save all config as metadata in the checkpoint + model_name=cfg.logger.system_name, + **cfg.logger.checkpointing.save_args, # Checkpoint args + ) + + max_episode_return = -jnp.inf + start_time = time.time() + + # Fill up buffer/explore. + learner_state, metrics = explore(learner_state) + + # Log explore metrics. + t = int(jnp.sum(learner_state.t)) + sps = t / (time.time() - start_time) + logger.log({"step": t}, t, 0, LogEvent.MISC) + + # Don't mind if episode isn't completed here, nice to have the graphs start near 0. + # So we ignore the second return value. + final_metrics, _ = episode_metrics.get_final_step_metrics(metrics) + final_metrics["steps_per_second"] = sps + logger.log(final_metrics, cfg.system.explore_steps, 0, LogEvent.ACT) + + # Main loop: + start = cfg.system.explore_steps + stop = int(cfg.system.total_timesteps + 1) + for eval_idx, t in enumerate(range(start, stop, steps_per_rollout)): + # Learn loop: + start_time = time.time() + learner_state, (metrics, losses) = update(learner_state) + jax.block_until_ready(learner_state) + t += steps_per_rollout # Completed rollout so add to step count. + + # Log: + elapsed_time = time.time() - start_time + final_metrics, ep_completed = episode_metrics.get_final_step_metrics(metrics) + final_metrics["steps_per_second"] = steps_per_rollout / elapsed_time + loss_metrics = losses | {"log_alpha": learner_state.params.log_alpha} + + logger.log({"timestep": t}, t, eval_idx, LogEvent.MISC) + if ep_completed: + logger.log(final_metrics, t, eval_idx, LogEvent.ACT) + logger.log(loss_metrics, t, eval_idx, LogEvent.TRAIN) + + # Evaluate: + key, eval_key = jax.random.split(key) + eval_keys = jax.random.split(eval_key, cfg.arch.n_devices) + eval_metrics = evaluator(unreplicate_batch_dim(learner_state.params.actor), eval_keys, {}) + logger.log(eval_metrics, t, eval_idx, LogEvent.EVAL) + episode_return = jnp.mean(eval_metrics["episode_return"]) + + # Save best actor params. + if cfg.arch.absolute_metric and max_episode_return <= episode_return: + best_params = copy.deepcopy(unreplicate_batch_dim(learner_state.params.actor)) + max_episode_return = episode_return + + # Checkpoint: + if cfg.logger.checkpointing.save_model: + # Save checkpoint of learner state + unreplicated_learner_state = unreplicate_n_dims(learner_state) # type: ignore + checkpointer.save( + timestep=t, + unreplicated_learner_state=unreplicated_learner_state, + episode_return=episode_return, + ) + + # Record the performance for the final evaluation run. + eval_performance = float(jnp.mean(eval_metrics[cfg.env.eval_metric])) + + # Measure absolute metric. + if cfg.arch.absolute_metric: + eval_keys = jax.random.split(key, cfg.arch.n_devices) + + abs_metric_evaluator = get_eval_fn(eval_env, eval_act_fn, cfg, absolute_metric=True) + eval_metrics = abs_metric_evaluator(best_params, eval_keys, {}) + + logger.log(eval_metrics, t, eval_idx, LogEvent.ABSOLUTE) + + logger.stop() + + return eval_performance + + +@hydra.main(config_path="../../../configs/default", config_name="ff_hasac.yaml", version_base="1.2") +def hydra_entry_point(cfg: DictConfig) -> float: + """Experiment entry point.""" + # Allow dynamic attributes. + OmegaConf.set_struct(cfg, False) + cfg.logger.system_name = "ff_hasac" + + # Run experiment. + final_return = run_experiment(cfg) + + print(f"{Fore.CYAN}{Style.BRIGHT}HASAC experiment completed{Style.RESET_ALL}") + + return float(final_return) + + +if __name__ == "__main__": + hydra_entry_point() diff --git a/mava/systems/sac/anakin/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py index 9e54dde2d..e908a63b6 100644 --- a/mava/systems/sac/anakin/ff_isac.py +++ b/mava/systems/sac/anakin/ff_isac.py @@ -52,6 +52,7 @@ from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.wrappers import episode_metrics @@ -110,8 +111,9 @@ def replicate(x: Any) -> Any: # Making actor network actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) + action_head, _ = get_action_head(env) actor_action_head = hydra.utils.instantiate( - cfg.network.action_head, action_dim=action_dim, independent_std=False + action_head, action_dim=env.action_dim, independent_std=False ) actor_network = Actor(actor_torso, actor_action_head) actor_params = actor_network.init(actor_key, obs_single_batched) @@ -242,23 +244,6 @@ def make_update_fns( full_action_shape = (cfg.arch.num_envs, *env.action_spec().shape) - def step( - action: Array, obs: Observation, env_state: State, buffer_state: BufferState - ) -> Tuple[Array, State, BufferState, Dict]: - """Given an action, step the environment and add to the buffer.""" - env_state, timestep = jax.vmap(env.step)(env_state, action) - next_obs = timestep.observation - rewards = timestep.reward - terms = ~timestep.discount.astype(bool) - infos = timestep.extras - - real_next_obs = infos["real_next_obs"] - - transition = Transition(obs, action, rewards, terms, real_next_obs) - buffer_state = rb.add(buffer_state, transition) - - return next_obs, env_state, buffer_state, infos["episode_metrics"] - # losses: def q_loss_fn( q_params: QVals, obs: Array, action: Array, target: Array @@ -415,6 +400,24 @@ def train( return (buffer_state, params, opt_states, t, key), losses + # Acting + def step( + action: Array, obs: Observation, env_state: State, buffer_state: BufferState + ) -> Tuple[Array, State, BufferState, Dict]: + """Given an action, step the environment and add to the buffer.""" + env_state, timestep = jax.vmap(env.step)(env_state, action) + next_obs = timestep.observation + rewards = timestep.reward + terms = ~timestep.discount.astype(bool) + infos = timestep.extras + + real_next_obs = infos["real_next_obs"] + + transition = Transition(obs, action, rewards, terms, real_next_obs) + buffer_state = rb.add(buffer_state, transition) + + return next_obs, env_state, buffer_state, infos["episode_metrics"] + def act( carry: Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], _: Any ) -> Tuple[Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], Dict]: diff --git a/mava/systems/sac/anakin/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py index d0c763760..425f98dee 100644 --- a/mava/systems/sac/anakin/ff_masac.py +++ b/mava/systems/sac/anakin/ff_masac.py @@ -53,6 +53,7 @@ from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.wrappers import episode_metrics @@ -113,8 +114,9 @@ def replicate(x: Any) -> Any: # Making actor network actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) + action_head, _ = get_action_head(env) actor_action_head = hydra.utils.instantiate( - cfg.network.action_head, action_dim=action_dim, independent_std=False + action_head, action_dim=env.action_dim, independent_std=False ) actor_network = Actor(actor_torso, actor_action_head) actor_params = actor_network.init(actor_key, obs_single_batched) @@ -245,23 +247,6 @@ def make_update_fns( full_action_shape = (cfg.arch.num_envs, *env.action_spec().shape) - def step( - action: Array, obs: ObservationGlobalState, env_state: State, buffer_state: BufferState - ) -> Tuple[Array, State, BufferState, Dict]: - """Given an action, step the environment and add to the buffer.""" - env_state, timestep = jax.vmap(env.step)(env_state, action) - next_obs = timestep.observation - rewards = timestep.reward - terms = ~timestep.discount.astype(bool) - infos = timestep.extras - - real_next_obs = infos["real_next_obs"] - - transition = Transition(obs, action, rewards, terms, real_next_obs) - buffer_state = rb.add(buffer_state, transition) - - return next_obs, env_state, buffer_state, infos["episode_metrics"] - # losses: def q_loss_fn( q_params: QVals, obs: Array, action: Array, target: Array @@ -432,6 +417,24 @@ def train( return (buffer_state, params, opt_states, t, key), losses + # Acting + def step( + action: Array, obs: ObservationGlobalState, env_state: State, buffer_state: BufferState + ) -> Tuple[Array, State, BufferState, Dict]: + """Given an action, step the environment and add to the buffer.""" + env_state, timestep = jax.vmap(env.step)(env_state, action) + next_obs = timestep.observation + rewards = timestep.reward + terms = ~timestep.discount.astype(bool) + infos = timestep.extras + + real_next_obs = infos["real_next_obs"] + + transition = Transition(obs, action, rewards, terms, real_next_obs) + buffer_state = rb.add(buffer_state, transition) + + return next_obs, env_state, buffer_state, infos["episode_metrics"] + def act( carry: Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], _: Any ) -> Tuple[Tuple[FrozenVariableDict, Array, State, BufferState, chex.PRNGKey], Dict]: @@ -566,9 +569,6 @@ def run_experiment(cfg: DictConfig) -> float: t += steps_per_rollout # Completed rollout so add to step count. # Log: - # Add learn steps here because anakin steps per second is learn + act steps - # But we also want to make sure we're counting env steps correctly so - # learn steps is not included in the loop counter. elapsed_time = time.time() - start_time final_metrics, ep_completed = episode_metrics.get_final_step_metrics(metrics) final_metrics["steps_per_second"] = steps_per_rollout / elapsed_time @@ -579,9 +579,6 @@ def run_experiment(cfg: DictConfig) -> float: logger.log(final_metrics, t, eval_idx, LogEvent.ACT) logger.log(loss_metrics, t, eval_idx, LogEvent.TRAIN) - # Prepare for evaluation. - start_time = time.time() - # Evaluate: key, eval_key = jax.random.split(key) eval_keys = jax.random.split(eval_key, cfg.arch.n_devices) diff --git a/mava/types.py b/mava/types.py index 8a191f5ab..4072629dc 100644 --- a/mava/types.py +++ b/mava/types.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Dict, Generic, Optional, Protocol, Tuple, TypeVar +from typing import Any, Callable, Dict, Generic, Optional, Protocol, Tuple, TypeVar, Union import chex import jumanji.specs as specs from flax.core.frozen_dict import FrozenDict -from jumanji import Environment from jumanji.types import TimeStep from tensorflow_probability.substrates.jax.distributions import Distribution from typing_extensions import NamedTuple, TypeAlias @@ -103,7 +102,7 @@ def discount_spec(self) -> specs.BoundedArray: ... @property - def unwrapped(self) -> Environment: + def unwrapped(self) -> Any: """Retuns: the innermost environment (without any wrappers applied).""" ... @@ -136,7 +135,7 @@ class ObservationGlobalState(NamedTuple): RNNObservation: TypeAlias = Tuple[Observation, Done] RNNGlobalObservation: TypeAlias = Tuple[ObservationGlobalState, Done] - +MavaObservation: TypeAlias = Union[Observation, ObservationGlobalState] # `MavaState` is the main type passed around in our systems. It is often used as a scan carry. # Types like: `LearnerState` (mava/systems//types.py) are `MavaState`s. diff --git a/mava/utils/checkpointing.py b/mava/utils/checkpointing.py index 8a3daf0e4..42d64aaa5 100644 --- a/mava/utils/checkpointing.py +++ b/mava/utils/checkpointing.py @@ -15,22 +15,20 @@ import os import warnings from datetime import datetime -from typing import Any, Dict, Optional, Tuple, Type, Union +from typing import Any, Dict, Optional, Tuple, Type import absl.logging as absl_logging import orbax.checkpoint from chex import Numeric -from flax.core.frozen_dict import FrozenDict from jax import tree from omegaconf import DictConfig, OmegaConf -from mava.systems.ppo.types import HiddenStates, Params from mava.types import MavaState # Keep track of the version of the checkpointer # Any breaking API changes should be reflected in the major version (e.g. v0.1 -> v1.0) # whereas minor versions (e.g. v0.1 -> v0.2) indicate backwards compatibility -CHECKPOINTER_VERSION = 1.0 +CHECKPOINTER_VERSION = 2.0 class Checkpointer: @@ -152,7 +150,7 @@ def restore_params( timestep: Optional[int] = None, restore_hstates: bool = False, THiddenState: Optional[Type] = None, # noqa: N803 - ) -> Tuple[Params, Union[HiddenStates, None]]: + ) -> Tuple[Any, Optional[Any]]: """Restore the params and the hidden state (in case of RNNs) Args: @@ -187,22 +185,13 @@ def restore_params( # The type of params to restore is the same type as the `input_params` TParams = type(input_params) # noqa: N806 - # Check the type of `input_params` for compatibility. - # This is a sanity check to ensure correct handling of parameter types. - # In Flax 0.6.11, parameters were typically of the `FrozenDict` type, - # but in later versions, a regular dictionary is used. - if isinstance(input_params.actor_params, FrozenDict): - restored_params = TParams(**FrozenDict(restored_learner_state_raw["params"])) - else: - restored_params = TParams(**restored_learner_state_raw["params"]) + # We no longer check if params are in a FrozenDict since we require Flax >= 0.8.1 + restored_params = TParams(**restored_learner_state_raw["params"]) # Restore hidden states if required restored_hstates = None if restore_hstates and THiddenState is not None: - if isinstance(input_params.actor_params, FrozenDict): - restored_hstates = THiddenState(**FrozenDict(restored_learner_state_raw["hstates"])) - else: - restored_hstates = THiddenState(**restored_learner_state_raw["hstates"]) + restored_hstates = THiddenState(**restored_learner_state_raw["hstates"]) return restored_params, restored_hstates diff --git a/mava/utils/jax_utils.py b/mava/utils/jax_utils.py index c89c6a4a4..2425210e9 100644 --- a/mava/utils/jax_utils.py +++ b/mava/utils/jax_utils.py @@ -14,13 +14,31 @@ # TODO: Rewrite this file to handle only JAX arrays. -from typing import Any +from typing import Any, Tuple, Union import chex import jax import jax.numpy as jnp import numpy as np from jax import tree +from typing_extensions import TypeAlias + +# Different types used for indexing arrays: int/slice or tuple of int/slice +Indexer: TypeAlias = Union[int, slice, Tuple[slice, ...], Tuple[int, ...]] + + +def tree_slice(pytree: chex.ArrayTree, i: Indexer) -> chex.ArrayTree: + """Returns: a new pytree where for each leaf: leaf[i] is returned.""" + return tree.map(lambda x: x[i], pytree) + + +def tree_at_set(old_tree: chex.ArrayTree, i: Indexer, new_tree: chex.ArrayTree) -> chex.ArrayTree: + """Update `old_tree` at position `i` with `new_tree`. + Both trees must have equal dtypes and structures. + """ + chex.assert_trees_all_equal_structs(old_tree, new_tree) + chex.assert_trees_all_equal_dtypes(old_tree, new_tree) + return tree.map(lambda old, new: old.at[i].set(new), old_tree, new_tree) def ndim_at_least(x: chex.Array, num_dims: chex.Numeric) -> chex.Array: @@ -49,6 +67,22 @@ def merge_leading_dims(x: chex.Array, num_dims: chex.Numeric) -> chex.Array: return x.reshape(new_shape) +def concat_time_and_agents(x: chex.Array) -> chex.Array: + """Concatenates the time and agent dimensions in the input tensor. + + Args: + ---- + x: Input tensor of shape (Time, Batch, Agents, ...). + + Returns: + ------- + chex.Array: Tensor of shape (Batch, Time x Agents, ...). + """ + x = jnp.moveaxis(x, 0, 1) + x = jnp.reshape(x, (x.shape[0], x.shape[1] * x.shape[2], *x.shape[3:])) + return x + + def unreplicate_n_dims(x: Any, unreplicate_depth: int = 2) -> Any: """Unreplicates a pytree by removing the first `unreplicate_depth` axes. diff --git a/mava/utils/logger.py b/mava/utils/logger.py index bd090604b..de7cbdc70 100644 --- a/mava/utils/logger.py +++ b/mava/utils/logger.py @@ -300,7 +300,7 @@ def log_dict(self, data: Dict, step: int, eval_step: int, event: LogEvent) -> No for value in data.values(): value = value.item() if isinstance(value, jax.Array) else value values.append(f"{value:.3f}" if isinstance(value, float) else str(value)) - log_str = " | ".join([f"{k}: {v}" for k, v in zip(keys, values)]) + log_str = " | ".join([f"{k}: {v}" for k, v in zip(keys, values, strict=True)]) self.logger.info( f"{colour}{Style.BRIGHT}{event.value.upper()} - {log_str}{Style.RESET_ALL}" @@ -346,7 +346,7 @@ def get_logger_path(config: DictConfig, logger_type: str) -> str: def describe(x: ArrayLike) -> Union[Dict[str, ArrayLike], ArrayLike]: """Generate summary statistics for an array of metrics (mean, std, min, max).""" - if not isinstance(x, (jax.Array, np.ndarray)) or x.size <= 1: + if not isinstance(x, (jax.Array, np.ndarray)) or x.ndim == 0: return x # np instead of jnp because we don't jit here diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 9d32112c9..8794093ac 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Tuple, Type +from typing import Tuple import gymnasium import gymnasium as gym @@ -56,23 +56,24 @@ SmaxWrapper, UoeWrapper, async_multiagent_worker, + VectorConnectorWrapper, ) -from mava.wrappers.jaxmarl import JaxMarlWrapper # Registry mapping environment names to their generator and wrapper classes. _jumanji_registry = { - "RobotWarehouse-v0": {"generator": RwareRandomGenerator, "wrapper": RwareWrapper}, - "LevelBasedForaging-v0": {"generator": LbfRandomGenerator, "wrapper": LbfWrapper}, - "MaConnector-v2": { + "RobotWarehouse": {"generator": RwareRandomGenerator, "wrapper": RwareWrapper}, + "LevelBasedForaging": {"generator": LbfRandomGenerator, "wrapper": LbfWrapper}, + "MaConnector": {"generator": ConnectorRandomGenerator, "wrapper": ConnectorWrapper}, + "VectorMaConnector": { "generator": ConnectorRandomGenerator, - "wrapper": ConnectorWrapper, + "wrapper": VectorConnectorWrapper, }, - "Cleaner-v0": {"generator": CleanerRandomGenerator, "wrapper": CleanerWrapper}, + "Cleaner": {"generator": CleanerRandomGenerator, "wrapper": CleanerWrapper}, } # Registry mapping environment names directly to the corresponding wrapper classes. _matrax_registry = {"Matrax": MatraxWrapper} -_jaxmarl_registry: Dict[str, Type[JaxMarlWrapper]] = {"Smax": SmaxWrapper, "MaBrax": MabraxWrapper} +_jaxmarl_registry = {"Smax": SmaxWrapper, "MaBrax": MabraxWrapper} _gigastep_registry = {"Gigastep": GigastepWrapper} _gym_registry = { @@ -99,9 +100,7 @@ def add_extra_wrappers( return train_env, eval_env -def make_jumanji_env( - env_name: str, config: DictConfig, add_global_state: bool = False -) -> Tuple[MarlEnv, MarlEnv]: +def make_jumanji_env(config: DictConfig, add_global_state: bool = False) -> Tuple[MarlEnv, MarlEnv]: """ Create a Jumanji environments for training and evaluation. @@ -117,14 +116,14 @@ def make_jumanji_env( """ # Config generator and select the wrapper. - generator = _jumanji_registry[env_name]["generator"] + generator = _jumanji_registry[config.env.env_name]["generator"] generator = generator(**config.env.scenario.task_config) - wrapper = _jumanji_registry[env_name]["wrapper"] + wrapper = _jumanji_registry[config.env.env_name]["wrapper"] # Create envs. env_config = {**config.env.kwargs, **config.env.scenario.env_kwargs} - train_env = jumanji.make(env_name, generator=generator, **env_config) - eval_env = jumanji.make(env_name, generator=generator, **env_config) + train_env = jumanji.make(config.env.scenario.name, generator=generator, **env_config) + eval_env = jumanji.make(config.env.scenario.name, generator=generator, **env_config) train_env = wrapper(train_env, add_global_state=add_global_state) eval_env = wrapper(eval_env, add_global_state=add_global_state) @@ -132,9 +131,7 @@ def make_jumanji_env( return train_env, eval_env -def make_jaxmarl_env( - env_name: str, config: DictConfig, add_global_state: bool = False -) -> Tuple[MarlEnv, MarlEnv]: +def make_jaxmarl_env(config: DictConfig, add_global_state: bool = False) -> Tuple[MarlEnv, MarlEnv]: """ Create a JAXMARL environment. @@ -150,16 +147,16 @@ def make_jaxmarl_env( """ kwargs = dict(config.env.kwargs) - if "smax" in env_name.lower(): + if "smax" in config.env.env_name.lower(): kwargs["scenario"] = map_name_to_scenario(config.env.scenario.task_name) # Create jaxmarl envs. - train_env = _jaxmarl_registry[config.env.env_name]( - jaxmarl.make(env_name, **kwargs), + train_env: MarlEnv = _jaxmarl_registry[config.env.env_name]( + jaxmarl.make(config.env.scenario.name, **kwargs), add_global_state, ) - eval_env = _jaxmarl_registry[config.env.env_name]( - jaxmarl.make(env_name, **kwargs), + eval_env: MarlEnv = _jaxmarl_registry[config.env.env_name]( + jaxmarl.make(config.env.scenario.name, **kwargs), add_global_state, ) @@ -168,9 +165,7 @@ def make_jaxmarl_env( return train_env, eval_env -def make_matrax_env( - env_name: str, config: DictConfig, add_global_state: bool = False -) -> Tuple[MarlEnv, MarlEnv]: +def make_matrax_env(config: DictConfig, add_global_state: bool = False) -> Tuple[MarlEnv, MarlEnv]: """ Creates Matrax environments for training and evaluation. @@ -186,7 +181,7 @@ def make_matrax_env( """ # Select the Matrax wrapper. - wrapper = _matrax_registry[env_name] + wrapper = _matrax_registry[config.env.scenario.name] # Create envs. task_name = config["env"]["scenario"]["task_name"] @@ -200,7 +195,7 @@ def make_matrax_env( def make_gigastep_env( - env_name: str, config: DictConfig, add_global_state: bool = False + config: DictConfig, add_global_state: bool = False ) -> Tuple[MarlEnv, MarlEnv]: """ Create a Gigastep environment. @@ -216,13 +211,13 @@ def make_gigastep_env( A tuple of the environments. """ - wrapper = _gigastep_registry[env_name] + wrapper = _gigastep_registry[config.env.scenario.name] kwargs = config.env.kwargs scenario = ScenarioBuilder.from_config(config.env.scenario.task_config) - train_env = wrapper(scenario.make(**kwargs), has_global_state=add_global_state) - eval_env = wrapper(scenario.make(**kwargs), has_global_state=add_global_state) + train_env: MarlEnv = wrapper(scenario.make(**kwargs), has_global_state=add_global_state) + eval_env: MarlEnv = wrapper(scenario.make(**kwargs), has_global_state=add_global_state) train_env, eval_env = add_extra_wrappers(train_env, eval_env, config) return train_env, eval_env @@ -280,15 +275,15 @@ def make(config: DictConfig, add_global_state: bool = False) -> Tuple[MarlEnv, M A tuple of the environments. """ - env_name = config.env.scenario.name + env_name = config.env.env_name if env_name in _jumanji_registry: - return make_jumanji_env(env_name, config, add_global_state) - elif env_name in jaxmarl.registered_envs: - return make_jaxmarl_env(env_name, config, add_global_state) + return make_jumanji_env(config, add_global_state) + elif env_name in _jaxmarl_registry: + return make_jaxmarl_env(config, add_global_state) elif env_name in _matrax_registry: - return make_matrax_env(env_name, config, add_global_state) + return make_matrax_env(config, add_global_state) elif env_name in _gigastep_registry: - return make_gigastep_env(env_name, config, add_global_state) + return make_gigastep_env(config, add_global_state) else: raise ValueError(f"{env_name} is not a supported environment.") diff --git a/mava/utils/network_utils.py b/mava/utils/network_utils.py new file mode 100644 index 000000000..a2949bdd3 --- /dev/null +++ b/mava/utils/network_utils.py @@ -0,0 +1,30 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Tuple + +from jumanji.specs import DiscreteArray, MultiDiscreteArray + +from mava.types import MarlEnv + +_DISCRETE = "discrete" +_CONTINUOUS = "continuous" + + +def get_action_head(env: MarlEnv) -> Tuple[Dict[str, str], str]: + """Returns the appropriate action head config based on the environment action_spec.""" + if isinstance(env.action_spec(), (DiscreteArray, MultiDiscreteArray)): + return {"_target_": "mava.networks.heads.DiscreteActionHead"}, _DISCRETE + + return {"_target_": "mava.networks.heads.ContinuousActionHead"}, _CONTINUOUS diff --git a/mava/wrappers/__init__.py b/mava/wrappers/__init__.py index a241c9658..31dc81672 100644 --- a/mava/wrappers/__init__.py +++ b/mava/wrappers/__init__.py @@ -30,6 +30,7 @@ ConnectorWrapper, LbfWrapper, RwareWrapper, + VectorConnectorWrapper, ) from mava.wrappers.matrax import MatraxWrapper from mava.wrappers.observation import AgentIDWrapper diff --git a/mava/wrappers/gigastep.py b/mava/wrappers/gigastep.py index ba4ab9206..f395e0536 100644 --- a/mava/wrappers/gigastep.py +++ b/mava/wrappers/gigastep.py @@ -201,7 +201,7 @@ def observation_spec(self) -> specs.Spec: if self.has_global_state: global_state = specs.BoundedArray( (self.num_agents, self._env.observation_space.shape[0] * self._env.n_agents), - jnp.int32, + jnp.float32, 0, 255, "global_state", @@ -298,3 +298,7 @@ def adversary_policy(self, obs: Array, state: Tuple[Dict, Dict], key: PRNGKey) - """ return jax.random.randint(key, (obs.shape[0],), 0, self.action_dim) + + @property + def unwrapped(self) -> GigastepEnv: + return self._env diff --git a/mava/wrappers/jaxmarl.py b/mava/wrappers/jaxmarl.py index f6ad51558..2540f9de3 100644 --- a/mava/wrappers/jaxmarl.py +++ b/mava/wrappers/jaxmarl.py @@ -299,6 +299,10 @@ def discount_spec(self) -> specs.BoundedArray: name="discount", ) + @property + def unwrapped(self) -> MultiAgentEnv: + return self._env + @abstractmethod def action_mask(self, wrapped_env_state: Any) -> Array: """Get action mask for each agent.""" diff --git a/mava/wrappers/jumanji.py b/mava/wrappers/jumanji.py index 1393566c1..5716d5557 100644 --- a/mava/wrappers/jumanji.py +++ b/mava/wrappers/jumanji.py @@ -18,6 +18,7 @@ from typing import Tuple, Union import chex +import jax import jax.numpy as jnp from jumanji import specs from jumanji.env import Environment @@ -150,7 +151,7 @@ def observation_spec( inner_spec = super().observation_spec() spec = inner_spec.replace(agents_view=inner_spec.agents_view.replace(dtype=float)) if self.add_global_state: - spec = inner_spec.replace(global_state=inner_spec.global_state.replace(dtype=float)) + spec = spec.replace(global_state=inner_spec.global_state.replace(dtype=float)) return spec @@ -210,7 +211,7 @@ def observation_spec( inner_spec = super().observation_spec() spec = inner_spec.replace(agents_view=inner_spec.agents_view.replace(dtype=float)) if self.add_global_state: - spec = inner_spec.replace(global_state=inner_spec.global_state.replace(dtype=float)) + spec = spec.replace(global_state=inner_spec.global_state.replace(dtype=float)) return spec @@ -311,6 +312,146 @@ def observation_spec( return specs.Spec(Observation, "ObservationSpec", **obs_data) +def _slice_around(pos: chex.Array, fov: int) -> Tuple[chex.Array, chex.Array]: + """Return the start and length of a slice that when used to index a grid will + return a 2*fov+1 x 2*fov+1 sub-grid centered around pos. + + Returns are meant to be used with a `jax.lax.dynamic_slice` + """ + # Because we pad the grid by fov we need to shift the pos to the position + # it will be in the padded grid. + shifted_pos = pos + fov + + start_x = shifted_pos[0] - fov + start_y = shifted_pos[1] - fov + return start_x, start_y + + +# get location coordinates from 2D grid +def _get_location(grid: chex.Array) -> chex.Array: + row_len = grid.shape[-1] + index = jnp.argmax(grid) + return jnp.asarray((jnp.floor(index / row_len), jnp.remainder(index, row_len)), dtype=int) + + +class VectorConnectorWrapper(JumanjiMarlWrapper): + """Multi-agent wrapper for the MaConnector environment. + + This wrapper transforms the grid-based observation to a vector of features. This env should + have the AgentID wrapper applied to it since there is not longer a channel that can encode + AgentID information. + """ + + def __init__(self, env: MaConnector, add_global_state: bool = False): + super().__init__(env, add_global_state) + self._env: MaConnector + self.fov = 2 + + def modify_timestep(self, timestep: TimeStep) -> TimeStep[Observation]: + """Modify the timestep for the Connector environment.""" + + # TARGET = 3 = The number of different types of items on the grid. + def create_agents_view(grid: chex.Array) -> chex.Array: + positions = jnp.where(grid % TARGET == POSITION, True, False) + targets = jnp.where((grid % TARGET == 0) & (grid != EMPTY), True, False) + paths = jnp.where(grid % TARGET == PATH, True, False) + + # group positions and paths + blockers = jnp.where(positions, 1, jnp.where(paths, -1, 0)) + + position_per_agent = grid == POSITION + target_per_agent = grid == TARGET + + # group agents own target and other targets + combined_targets = jnp.where(target_per_agent, 1, jnp.where(targets, -1, 0)) + + # get coordinates of each agent's location and target + position_coords = jax.vmap(_get_location)(position_per_agent) + target_coords = jax.vmap(_get_location)(target_per_agent) + + def _create_one_agent_view(i: int) -> chex.Array: + slice_len = 2 * self.fov + 1, 2 * self.fov + 1 + slice_x, slice_y = _slice_around(position_coords[i], self.fov) + padded_blockers = jnp.pad(blockers[i], self.fov, constant_values=True) + + blockers_around_agent = jax.lax.dynamic_slice( + padded_blockers, (slice_x, slice_y), slice_len + ) + blockers_around_agent = jnp.reshape(blockers_around_agent, -1).astype(float) + + my_pos = position_coords[i] / grid[0].size + my_target = target_coords[i] / grid[0].size + + padded_combined_targets = jnp.pad( + combined_targets[i], self.fov, constant_values=True + ) + + targets_around_agent = jax.lax.dynamic_slice( + padded_combined_targets, (slice_x, slice_y), slice_len + ) + targets_around_agent = jnp.reshape(targets_around_agent, -1).astype(float) + + return jnp.concatenate( + [my_pos, my_target, blockers_around_agent, targets_around_agent], + dtype=float, + ) + + return jax.vmap(_create_one_agent_view)(jnp.arange(self.num_agents)) + + obs_data = { + "agents_view": create_agents_view(timestep.observation.grid), + "action_mask": timestep.observation.action_mask, + "step_count": jnp.repeat(timestep.observation.step_count, self.num_agents), + } + + # The episode is won if all agents have connected. + extras = timestep.extras | {"won_episode": timestep.extras["ratio_connections"] == 1.0} + + return timestep.replace(observation=Observation(**obs_data), extras=extras) + + def observation_spec( + self, + ) -> specs.Spec[Union[Observation, ObservationGlobalState]]: + """Specification of the observation of the environment.""" + step_count = specs.BoundedArray( + (self.num_agents,), + int, + jnp.zeros(self.num_agents, dtype=int), + jnp.repeat(self.time_limit, self.num_agents), + "step_count", + ) + + # 2 sets of tiles in fov (blockers and targets) + xy position of agent and target + tiles_in_fov = (self.fov * 2 + 1) ** 2 + single_agent_obs = 4 + tiles_in_fov * 2 + agents_view = specs.BoundedArray( + shape=(self.num_agents, single_agent_obs), + dtype=float, + name="agents_view", + minimum=-1.0, + maximum=1.0, + ) + + obs_data = { + "agents_view": agents_view, + "action_mask": self._env.observation_spec().action_mask, + "step_count": step_count, + } + + if self.add_global_state: + global_state = specs.BoundedArray( + shape=(self.num_agents, self.num_agents * single_agent_obs), + dtype=float, + name="global_state", + minimum=-1.0, + maximum=1.0, + ) + obs_data["global_state"] = global_state + return specs.Spec(ObservationGlobalState, "ObservationSpec", **obs_data) + + return specs.Spec(Observation, "ObservationSpec", **obs_data) + + class CleanerWrapper(JumanjiMarlWrapper): """Multi-agent wrapper for the Cleaner environment.""" diff --git a/pyproject.toml b/pyproject.toml index f4038941b..1ae507c56 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,42 +1,62 @@ -[tool.mypy] -python_version = 3.9 -namespace_packages = true -incremental = false -cache_dir = "" -warn_redundant_casts = true -warn_return_any = true -warn_unused_configs = true -warn_unused_ignores = false -allow_redefinition = true -disallow_untyped_calls = false -disallow_untyped_defs = true -disallow_incomplete_defs = true -check_untyped_defs = true -disallow_untyped_decorators = false -strict_optional = true -strict_equality = true -explicit_package_bases = true -follow_imports = "skip" -ignore_missing_imports = true +[build-system] +requires=["setuptools>=62.6"] +build-backend="setuptools.build_meta" + +[tool.setuptools.packages.find] +include=['mava*'] -[[tool.mypy.overrides]] -module = [ - "numpy.*", - "optax.*", - "neptune.*", - "hydra.*", - "omegaconf.*", +[project] +name="id-mava" +authors=[{name="InstaDeep Ltd"}] +dynamic=["version", "dependencies", "optional-dependencies"] +license={file="LICENSE"} +description="Distributed Multi-Agent Reinforcement Learning in JAX." +readme ="README.md" +requires-python=">=3.10" +keywords=["multi-agent", "reinforcement learning", "python", "jax", "anakin", "sebulba"] +classifiers=[ + "Environment :: Console", + "Intended Audience :: Science/Research", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Python Modules", + "License :: OSI Approved :: Apache Software License", ] +[tool.setuptools.dynamic] +version={attr="mava.__version__"} +dependencies={file="requirements/requirements.txt"} +optional-dependencies={dev={file=["requirements/requirements-dev.txt"]}} + +[project.urls] +"Homepage"="https://github.com/instadeep/Mava" +"Bug Tracker"="https://github.com/instadeep/Mava/issues" + +[tool.mypy] +python_version="3.10" +warn_redundant_casts=true +disallow_untyped_defs=true +strict_equality=true +follow_imports="skip" +ignore_missing_imports=true [tool.ruff] -line-length = 100 +line-length=100 [tool.ruff.lint] -select = ["A", "B", "E", "F", "I", "N", "W", "RUF", "ANN"] -ignore = [ +select=["A", "B", "E", "F", "I", "N", "W", "RUF", "ANN"] +ignore=[ "E731", # Allow lambdas to be assigned to variables. "ANN101", # no need to type self "ANN102", # no need to type cls "ANN204", # no need for return type for special methods "ANN401", # can use Any type ] + +[tool.ruff.lint.pep8-naming] +ignore-names = ["?"] diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 13ff3a050..2e168bff8 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -2,7 +2,7 @@ brax==0.10.3 colorama distrax flashbax~=0.1.0 -flax +flax>=0.8.1 gigastep @ git+https://github.com/mlech26l/gigastep gymnasium hydra-core==1.3.2 @@ -12,7 +12,7 @@ jaxlib==0.4.30 jaxmarl jumanji @ git+https://github.com/sash-a/jumanji@old_jumanji # Includes a few extra MARL envs lbforaging -matrax @ git+https://github.com/instadeepai/matrax +matrax @ git+https://github.com/instadeepai/matrax@4c5d8aa97214848ea659274f16c48918c13e845b mujoco==3.1.3 mujoco-mjx==3.1.3 neptune diff --git a/setup.py b/setup.py deleted file mode 100644 index da7032c6c..000000000 --- a/setup.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2022 InstaDeep Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from typing import List - -import setuptools -from setuptools import setup - - -def _parse_requirements(path: str) -> List[str]: - """Returns content of given requirements file.""" - with open(os.path.join(path)) as f: - return [line.rstrip() for line in f if not (line.isspace() or line.startswith("#"))] - - -def _get_version() -> str: - """Grabs the package version from mava/version.py.""" - dict_: dict = {} - with open("mava/version.py") as f: - exec(f.read(), dict_) - return dict_["__version__"] - - -setup( - name="id-mava", # could we just change this to mava? - version=_get_version(), - author="InstaDeep Ltd", - description="A Python library for Multi-Agent Reinforcement Learning in JAX.", - license="Apache 2.0", - url="https://github.com/instadeepai/mava/", - long_description=open("README.md").read(), - long_description_content_type="text/markdown", - keywords="multi-agent reinforcement-learning python jax", - packages=setuptools.find_packages(), - python_requires=">=3.9", - install_requires=_parse_requirements("requirements/requirements.txt"), - extras_require={ - "dev": _parse_requirements("requirements/requirements-dev.txt"), - }, - package_data={"mava": ["py.typed"]}, - classifiers=[ - "Development Status :: 3 - Alpha", - "Environment :: Console", - "Intended Audience :: Science/Research", - "Intended Audience :: Developers", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3.9", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development :: Libraries :: Python Modules", - "License :: OSI Approved :: Apache Software License", - ], - zip_safe=False, - include_package_data=True, -) diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 000000000..21db9ec1c --- /dev/null +++ b/test/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/conftest.py b/test/conftest.py index 03c6f0710..cde9c1305 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -16,33 +16,42 @@ import pytest +from test.utils import ConfigValue + @pytest.fixture -def fast_config() -> Dict[str, Dict[str, bool | int | float]]: +def fast_config() -> Dict[str, ConfigValue]: return { - "system": { - # common - "num_updates": 2, - "rollout_length": 1, - "num_minibatches": 1, - "update_batch_size": 1, - # ppo: - "ppo_epochs": 1, - # sac: - "explore_steps": 1, - "epochs": 1, # also for iql - "policy_update_delay": 1, - "buffer_size": 8, # also for iql - "batch_size": 1, - # iql - "min_buffer_size": 4, - "sample_batch_size": 1, - "sample_sequence_length": 1, - }, - "arch": { - "num_envs": 1, - "num_eval_episodes": 1, - "num_evaluation": 1, - "absolute_metric": False, - }, + # ---------- system config --------- + # common + "num_updates": 2, + "rollout_length": 1, + "num_minibatches": 1, + "update_batch_size": 1, + # ppo + "ppo_epochs": 1, + # sac + "explore_steps": 1, + "epochs": 1, # also for iql + "policy_update_delay": 1, + "buffer_size": 8, # also for iql + "batch_size": 1, + # iql + "min_buffer_size": 4, + "sample_batch_size": 1, + "sample_sequence_length": 2, + # ---------- arch config ---------- + "num_envs": 1, + "num_eval_episodes": 1, + "num_evaluation": 1, + "absolute_metric": False, + # ---------- network config ---------- + "hidden_state_dim": 2, + "layer_sizes": [4], + "channel_sizes": [1, 1], + "use_layer_norm": False, + # -------- transformer specific -------- + "n_block": 1, + "n_head": 1, + "n_embd": 8, } diff --git a/test/integration_test.py b/test/integration_test.py index a9f79214a..419cf5799 100644 --- a/test/integration_test.py +++ b/test/integration_test.py @@ -19,6 +19,8 @@ from hydra import compose, initialize from omegaconf import DictConfig, OmegaConf +from test.utils import find_replace + # This integration test is not exhaustive, that would be too expensive. This means that not all # system run all envs, but each env and each system is run at least once. # For each system we select a random environment to run. @@ -31,10 +33,13 @@ "ppo.anakin.rec_ippo", "ppo.anakin.rec_mappo", ] -q_learning_systems = ["q_learning.anakin.rec_iql"] -sac_systems = ["sac.anakin.ff_isac", "sac.anakin.ff_masac"] -discrete_envs = ["gigastep", "lbf", "matrax", "rware", "smax"] +sac_systems = ["sac.anakin.ff_isac", "sac.anakin.ff_masac", "sac.anakin.ff_hasac"] +q_learning_systems = ["q_learning.anakin.rec_iql", "q_learning.anakin.rec_qmix"] +transformer_systems = ["mat.anakin.mat"] +sable_systems = ["sable.anakin.ff_sable", "sable.anakin.rec_sable"] + +discrete_envs = ["gigastep", "lbf", "matrax", "rware", "smax", "vector-connector"] cnn_envs = ["cleaner", "connector"] continuous_envs = ["mabrax"] @@ -53,14 +58,11 @@ def _run_system(system_name: str, cfg: DictConfig) -> float: return float(eval_perf) -def _get_fast_config(cfg: DictConfig, fast_config: dict) -> DictConfig: +def _get_fast_config(cfg: DictConfig, config_modifications: dict) -> DictConfig: """Makes the configs use a minimum number of timesteps and evaluations.""" - dconf: dict = OmegaConf.to_container(cfg, resolve=True) - dconf["system"] |= fast_config["system"] - dconf["arch"] |= fast_config["arch"] - cfg = OmegaConf.create(dconf) - - return cfg + return OmegaConf.create( + find_replace(OmegaConf.to_container(cfg, resolve=True), config_modifications) + ) @pytest.mark.parametrize("system_path", ppo_systems) @@ -76,6 +78,19 @@ def test_ppo_system(fast_config: dict, system_path: str) -> None: _run_system(system_path, cfg) +@pytest.mark.parametrize("system_path", sable_systems) +def test_sable_system(fast_config: dict, system_path: str) -> None: + """Test all sable systems on random envs.""" + _, _, system_name = system_path.split(".") + env = random.choice(discrete_envs) + + with initialize(version_base=None, config_path=config_path): + cfg = compose(config_name=f"{system_name}", overrides=[f"env={env}"]) + cfg = _get_fast_config(cfg, fast_config) + + _run_system(system_path, cfg) + + @pytest.mark.parametrize("system_path", q_learning_systems) def test_q_learning_system(fast_config: dict, system_path: str) -> None: """Test all Q-Learning systems on random envs.""" @@ -102,6 +117,19 @@ def test_sac_system(fast_config: dict, system_path: str) -> None: _run_system(system_path, cfg) +@pytest.mark.parametrize("system_path", transformer_systems) +def test_transformer_system(fast_config: dict, system_path: str) -> None: + """Test transformer systems on random envs.""" + _, _, system_name = system_path.split(".") + env = random.choice(continuous_envs + discrete_envs) + + with initialize(version_base=None, config_path=config_path): + cfg = compose(config_name=f"{system_name}", overrides=[f"env={env}"]) + cfg = _get_fast_config(cfg, fast_config) + + _run_system(system_path, cfg) + + @pytest.mark.parametrize("env_name", discrete_envs) def test_discrete_env(fast_config: dict, env_name: str) -> None: """Test all discrete envs on random systems.""" @@ -139,7 +167,7 @@ def test_continuous_env(fast_config: dict, env_name: str) -> None: system_path = random.choice(ppo_systems + sac_systems) _, _, system_name = system_path.split(".") - overrides = [f"env={env_name}", "network=continuous_mlp"] + overrides = [f"env={env_name}"] with initialize(version_base=None, config_path=config_path): cfg = compose(config_name=f"{system_name}", overrides=overrides) cfg = _get_fast_config(cfg, fast_config) diff --git a/test/utils.py b/test/utils.py new file mode 100644 index 000000000..82fdf9cc8 --- /dev/null +++ b/test/utils.py @@ -0,0 +1,39 @@ +# Copyright 2022 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, List, TypeAlias + +_ConfSingleValue: TypeAlias = bool | int | float +ConfigValue: TypeAlias = _ConfSingleValue | List[_ConfSingleValue] | Dict[str, _ConfSingleValue] + + +def find_replace(d: Dict[str, Any], replacements: Dict[str, ConfigValue]) -> Dict[str, ConfigValue]: + """Recursively searches through a dictionary and replaces values for specified keys. + + Args: + d: Dictionary to search through + replacements: The keys and values to replace + """ + + def _find_replace_recursive(current_dict: Dict[str, ConfigValue]) -> Dict[str, ConfigValue]: + """Helper function that recursively searches and replaces values.""" + for k, v in current_dict.items(): + if isinstance(v, dict): + current_dict[k] = _find_replace_recursive(v) + elif k in replacements: + current_dict[k] = replacements[k] + + return current_dict + + return _find_replace_recursive(d) From a723392b23e02dc015e9a43a5fd47008e6bf6f1c Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 13 Nov 2024 09:38:27 +0100 Subject: [PATCH 135/139] fix: sebulba compatiable get_action_head --- mava/evaluator.py | 3 ++- mava/systems/ppo/anakin/ff_ippo.py | 2 +- mava/systems/ppo/anakin/ff_mappo.py | 2 +- mava/systems/ppo/anakin/rec_ippo.py | 2 +- mava/systems/ppo/anakin/rec_mappo.py | 2 +- mava/systems/ppo/sebulba/ff_ippo.py | 7 ++++--- mava/utils/make_env.py | 2 +- mava/utils/network_utils.py | 12 ++++++------ 8 files changed, 17 insertions(+), 15 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index 21037c2c3..e1b35b7d9 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -37,6 +37,7 @@ RecActorApply, State, ) +from mava.wrappers.gym import GymToJumanji # Optional extras that are passed out of the actor and then into the actor in the next step ActorState: TypeAlias = Dict[str, Any] @@ -211,7 +212,7 @@ def eval_act_fn( def get_sebulba_eval_fn( - env_maker: Callable, + env_maker: Callable[[int, int], GymToJumanji], act_fn: EvalActFn, config: DictConfig, np_rng: np.random.Generator, diff --git a/mava/systems/ppo/anakin/ff_ippo.py b/mava/systems/ppo/anakin/ff_ippo.py index 698c505b2..201bd5fc0 100644 --- a/mava/systems/ppo/anakin/ff_ippo.py +++ b/mava/systems/ppo/anakin/ff_ippo.py @@ -362,7 +362,7 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) diff --git a/mava/systems/ppo/anakin/ff_mappo.py b/mava/systems/ppo/anakin/ff_mappo.py index 3103cc164..680e6361a 100644 --- a/mava/systems/ppo/anakin/ff_mappo.py +++ b/mava/systems/ppo/anakin/ff_mappo.py @@ -346,7 +346,7 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) diff --git a/mava/systems/ppo/anakin/rec_ippo.py b/mava/systems/ppo/anakin/rec_ippo.py index b936262ff..182382ac6 100644 --- a/mava/systems/ppo/anakin/rec_ippo.py +++ b/mava/systems/ppo/anakin/rec_ippo.py @@ -457,7 +457,7 @@ def learner_setup( # Define network and optimisers. actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) diff --git a/mava/systems/ppo/anakin/rec_mappo.py b/mava/systems/ppo/anakin/rec_mappo.py index f1105fe73..671d5cbc5 100644 --- a/mava/systems/ppo/anakin/rec_mappo.py +++ b/mava/systems/ppo/anakin/rec_mappo.py @@ -452,7 +452,7 @@ def learner_setup( # Define network and optimiser. actor_pre_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) actor_post_torso = hydra.utils.instantiate(config.network.actor_network.post_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_pre_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) critic_post_torso = hydra.utils.instantiate(config.network.critic_network.post_torso) diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 468957c46..76d133985 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -54,6 +54,7 @@ from mava.utils.config import check_sebulba_config, check_total_timesteps from mava.utils.jax_utils import merge_leading_dims, switch_leading_axes from mava.utils.logger import LogEvent, MavaLogger +from mava.utils.network_utils import get_action_head from mava.utils.sebulba import ParamsSource, Pipeline, RecordTimeTo, ThreadLifetime from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -466,9 +467,9 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - actor_action_head = hydra.utils.instantiate( - config.network.action_head, action_dim=config.system.num_actions - ) + action_head, _ = get_action_head(action_space) + actor_action_head = hydra.utils.instantiate(action_head, action_dim=config.system.num_actions) + critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) actor_network = Actor(torso=actor_torso, action_head=actor_action_head) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 8794093ac..e0360c706 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -55,8 +55,8 @@ SmacWrapper, SmaxWrapper, UoeWrapper, - async_multiagent_worker, VectorConnectorWrapper, + async_multiagent_worker, ) # Registry mapping environment names to their generator and wrapper classes. diff --git a/mava/utils/network_utils.py b/mava/utils/network_utils.py index a2949bdd3..03a7e439f 100644 --- a/mava/utils/network_utils.py +++ b/mava/utils/network_utils.py @@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Tuple +from typing import Dict, Tuple, Union -from jumanji.specs import DiscreteArray, MultiDiscreteArray +from jumanji.specs import DiscreteArray, MultiDiscreteArray, Spec +from gymnasium.spaces import Discrete, MultiDiscrete, Space -from mava.types import MarlEnv _DISCRETE = "discrete" _CONTINUOUS = "continuous" -def get_action_head(env: MarlEnv) -> Tuple[Dict[str, str], str]: +def get_action_head(action_types: Union[Spec, Space]) -> Tuple[Dict[str, str], str]: """Returns the appropriate action head config based on the environment action_spec.""" - if isinstance(env.action_spec(), (DiscreteArray, MultiDiscreteArray)): + if isinstance(action_types, (DiscreteArray, MultiDiscreteArray, Discrete, MultiDiscrete)): return {"_target_": "mava.networks.heads.DiscreteActionHead"}, _DISCRETE - return {"_target_": "mava.networks.heads.ContinuousActionHead"}, _CONTINUOUS + return {"_target_": "mava.networks.heads.ContinuousActionHead"}, _CONTINUOUS \ No newline at end of file From a75b2a2b7ac996ad6cf0987552a5e25c847e338d Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 13 Nov 2024 09:49:11 +0100 Subject: [PATCH 136/139] chore: pre-commits --- mava/evaluator.py | 12 ++++++------ mava/systems/ppo/sebulba/ff_ippo.py | 6 +++--- mava/utils/network_utils.py | 5 ++--- mava/wrappers/gym.py | 6 +++--- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/mava/evaluator.py b/mava/evaluator.py index e1b35b7d9..6b2fda203 100644 --- a/mava/evaluator.py +++ b/mava/evaluator.py @@ -270,7 +270,7 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: seeds = np_rng.integers(np.iinfo(np.int32).max, size=n_parallel_envs).tolist() ts = env.reset(seed=seeds) - timesteps = [ts] + timesteps_array = [ts] actor_state = init_act_state finished_eps = ts.last() @@ -280,11 +280,11 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: action, actor_state = act_fn(params, ts, act_key, actor_state) cpu_action = jax.device_get(action) ts = env.step(cpu_action) - timesteps.append(ts) + timesteps_array.append(ts) finished_eps = np.logical_or(finished_eps, ts.last()) - timesteps = jax.tree.map(lambda *x: np.stack(x), *timesteps) + timesteps = jax.tree.map(lambda *x: np.stack(x), *timesteps_array) metrics = timesteps.extras["episode_metrics"] if config.env.log_win_rate: @@ -301,13 +301,13 @@ def _episode(key: PRNGKey) -> Tuple[PRNGKey, Metrics]: # This loop is important because we don't want too many parallel envs. # So in evaluation we have num_envs parallel envs and loop enough times # so that we do at least `eval_episodes` number of episodes. - metrics = [] + metrics_array = [] for _ in range(episode_loops): key, metric = _episode(key) - metrics.append(metric) + metrics_array.append(metric) # flatten metrics - metrics: Metrics = jax.tree_map(lambda *x: np.array(x).reshape(-1), *metrics) + metrics: Metrics = jax.tree_map(lambda *x: np.array(x).reshape(-1), *metrics_array) return metrics def timed_eval_fn(params: FrozenDict, key: PRNGKey, init_act_state: ActorState) -> Metrics: diff --git a/mava/systems/ppo/sebulba/ff_ippo.py b/mava/systems/ppo/sebulba/ff_ippo.py index 76d133985..6f34c0b1a 100644 --- a/mava/systems/ppo/sebulba/ff_ippo.py +++ b/mava/systems/ppo/sebulba/ff_ippo.py @@ -404,7 +404,7 @@ def learner_thread( for _ in range(config.arch.num_evaluation): # Create the lists to store metrics and timings for this learning iteration. metrics: List[Tuple[Dict, Dict]] = [] - rollout_times: List[Dict] = [] + rollout_times_array: List[Dict] = [] learn_times: Dict[str, List[float]] = defaultdict(list) with RecordTimeTo(learn_times["learner_time_per_eval"]): @@ -423,7 +423,7 @@ def learner_thread( learner_state, ep_metrics, train_metrics = learn_fn(learner_state, traj_batch) metrics.append((ep_metrics, train_metrics)) - rollout_times.append(rollout_time) + rollout_times_array.append(rollout_time) # Update all the params sources so all actors can get the latest params params = jax.block_until_ready(learner_state.params) @@ -432,7 +432,7 @@ def learner_thread( # Pass all the metrics and params to the main thread (evaluator) for logging and evaluation ep_metrics, train_metrics = tree.map(lambda *x: np.asarray(x), *metrics) - rollout_times: Dict[str, NDArray] = tree.map(lambda *x: np.mean(x), *rollout_times) + rollout_times: Dict[str, NDArray] = tree.map(lambda *x: np.mean(x), *rollout_times_array) timing_dict = rollout_times | learn_times timing_dict = tree.map(np.mean, timing_dict, is_leaf=lambda x: isinstance(x, list)) diff --git a/mava/utils/network_utils.py b/mava/utils/network_utils.py index 03a7e439f..b16c46054 100644 --- a/mava/utils/network_utils.py +++ b/mava/utils/network_utils.py @@ -14,9 +14,8 @@ from typing import Dict, Tuple, Union -from jumanji.specs import DiscreteArray, MultiDiscreteArray, Spec from gymnasium.spaces import Discrete, MultiDiscrete, Space - +from jumanji.specs import DiscreteArray, MultiDiscreteArray, Spec _DISCRETE = "discrete" _CONTINUOUS = "continuous" @@ -27,4 +26,4 @@ def get_action_head(action_types: Union[Spec, Space]) -> Tuple[Dict[str, str], s if isinstance(action_types, (DiscreteArray, MultiDiscreteArray, Discrete, MultiDiscrete)): return {"_target_": "mava.networks.heads.DiscreteActionHead"}, _DISCRETE - return {"_target_": "mava.networks.heads.ContinuousActionHead"}, _CONTINUOUS \ No newline at end of file + return {"_target_": "mava.networks.heads.ContinuousActionHead"}, _CONTINUOUS diff --git a/mava/wrappers/gym.py b/mava/wrappers/gym.py index 594fdc7eb..9258bde6a 100644 --- a/mava/wrappers/gym.py +++ b/mava/wrappers/gym.py @@ -57,13 +57,13 @@ class TimeStep: observation: Union[Observation, ObservationGlobalState] extras: Dict = field(default_factory=dict) - def first(self) -> bool: + def first(self) -> NDArray: return self.step_type == StepType.FIRST - def mid(self) -> bool: + def mid(self) -> NDArray: return self.step_type == StepType.MID - def last(self) -> bool: + def last(self) -> NDArray: return self.step_type == StepType.LAST From 3fce221acee92a2172e0cf003f2f500616e93f5e Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 13 Nov 2024 11:36:18 +0100 Subject: [PATCH 137/139] fix: action_head parameters for all systems --- mava/advanced_usage/ff_ippo_store_experience.py | 2 +- mava/systems/mat/anakin/mat.py | 4 ++-- mava/systems/sable/anakin/ff_sable.py | 4 ++-- mava/systems/sable/anakin/rec_sable.py | 4 ++-- mava/systems/sac/anakin/ff_hasac.py | 4 ++-- mava/systems/sac/anakin/ff_isac.py | 2 +- mava/systems/sac/anakin/ff_masac.py | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mava/advanced_usage/ff_ippo_store_experience.py b/mava/advanced_usage/ff_ippo_store_experience.py index 9546ddbb3..da657e9b6 100644 --- a/mava/advanced_usage/ff_ippo_store_experience.py +++ b/mava/advanced_usage/ff_ippo_store_experience.py @@ -361,7 +361,7 @@ def learner_setup( # Define network and optimiser. actor_torso = hydra.utils.instantiate(config.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate(action_head, action_dim=env.action_dim) critic_torso = hydra.utils.instantiate(config.network.critic_network.pre_torso) diff --git a/mava/systems/mat/anakin/mat.py b/mava/systems/mat/anakin/mat.py index 944ab77d1..c7d62ac54 100644 --- a/mava/systems/mat/anakin/mat.py +++ b/mava/systems/mat/anakin/mat.py @@ -41,6 +41,7 @@ ) from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import ( merge_leading_dims, unreplicate_batch_dim, @@ -48,7 +49,6 @@ ) from mava.utils.logger import LogEvent, MavaLogger from mava.utils.network_utils import get_action_head -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -355,7 +355,7 @@ def learner_setup( init_x = env.observation_spec().generate_value() init_x = tree.map(lambda x: x[None, ...], init_x) - _, action_space_type = get_action_head(env) + _, action_space_type = get_action_head(env.action_spec()) if action_space_type == "discrete": init_action = jnp.zeros((1, config.system.num_agents), dtype=jnp.int32) diff --git a/mava/systems/sable/anakin/ff_sable.py b/mava/systems/sable/anakin/ff_sable.py index bcd7dd3e0..2e7b6812f 100644 --- a/mava/systems/sable/anakin/ff_sable.py +++ b/mava/systems/sable/anakin/ff_sable.py @@ -43,10 +43,10 @@ from mava.types import Action, ExperimentOutput, LearnerFn, MarlEnv from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import merge_leading_dims, unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.network_utils import get_action_head -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -399,7 +399,7 @@ def learner_setup( # Set positional encoding to False, since ff-sable does not use temporal dependencies. config.network.memory_config.timestep_positional_encoding = False - _, action_space_type = get_action_head(env) + _, action_space_type = get_action_head(env.action_spec()) # Define network. sable_network = SableNetwork( diff --git a/mava/systems/sable/anakin/rec_sable.py b/mava/systems/sable/anakin/rec_sable.py index 5f1a4c16e..50eba885f 100644 --- a/mava/systems/sable/anakin/rec_sable.py +++ b/mava/systems/sable/anakin/rec_sable.py @@ -44,10 +44,10 @@ from mava.types import Action, ExperimentOutput, LearnerFn, MarlEnv from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import concat_time_and_agents, unreplicate_batch_dim, unreplicate_n_dims from mava.utils.logger import LogEvent, MavaLogger from mava.utils.network_utils import get_action_head -from mava.utils.total_timestep_checker import check_total_timesteps from mava.utils.training import make_learning_rate from mava.wrappers.episode_metrics import get_final_step_metrics @@ -425,7 +425,7 @@ def learner_setup( else: config.network.memory_config.chunk_size = config.system.rollout_length * n_agents - _, action_space_type = get_action_head(env) + _, action_space_type = get_action_head(env.action_spec()) # Define network. sable_network = SableNetwork( diff --git a/mava/systems/sac/anakin/ff_hasac.py b/mava/systems/sac/anakin/ff_hasac.py index 0ea26ba9e..043db91d9 100644 --- a/mava/systems/sac/anakin/ff_hasac.py +++ b/mava/systems/sac/anakin/ff_hasac.py @@ -52,6 +52,7 @@ from mava.utils import make_env as environments from mava.utils.centralised_training import get_joint_action from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import ( tree_at_set, tree_slice, @@ -60,7 +61,6 @@ ) from mava.utils.logger import LogEvent, MavaLogger from mava.utils.network_utils import get_action_head -from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics # General shape comment guideline: @@ -153,7 +153,7 @@ def replicate(x: Any) -> Any: # Making actor network actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate( action_head, action_dim=env.action_dim, independent_std=False ) diff --git a/mava/systems/sac/anakin/ff_isac.py b/mava/systems/sac/anakin/ff_isac.py index e908a63b6..12416d542 100644 --- a/mava/systems/sac/anakin/ff_isac.py +++ b/mava/systems/sac/anakin/ff_isac.py @@ -111,7 +111,7 @@ def replicate(x: Any) -> Any: # Making actor network actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate( action_head, action_dim=env.action_dim, independent_std=False ) diff --git a/mava/systems/sac/anakin/ff_masac.py b/mava/systems/sac/anakin/ff_masac.py index 425f98dee..693364d68 100644 --- a/mava/systems/sac/anakin/ff_masac.py +++ b/mava/systems/sac/anakin/ff_masac.py @@ -114,7 +114,7 @@ def replicate(x: Any) -> Any: # Making actor network actor_torso = hydra.utils.instantiate(cfg.network.actor_network.pre_torso) - action_head, _ = get_action_head(env) + action_head, _ = get_action_head(env.action_spec()) actor_action_head = hydra.utils.instantiate( action_head, action_dim=env.action_dim, independent_std=False ) From acf1830505fe47c801cb34a661c062925922df8f Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 13 Nov 2024 11:56:47 +0100 Subject: [PATCH 138/139] chore: pre-commits --- mava/utils/make_env.py | 1 - mava/utils/network_utils.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/utils/make_env.py b/mava/utils/make_env.py index 40c38e94e..e0360c706 100644 --- a/mava/utils/make_env.py +++ b/mava/utils/make_env.py @@ -57,7 +57,6 @@ UoeWrapper, VectorConnectorWrapper, async_multiagent_worker, - VectorConnectorWrapper, ) # Registry mapping environment names to their generator and wrapper classes. diff --git a/mava/utils/network_utils.py b/mava/utils/network_utils.py index a6483e74f..b16c46054 100644 --- a/mava/utils/network_utils.py +++ b/mava/utils/network_utils.py @@ -20,6 +20,7 @@ _DISCRETE = "discrete" _CONTINUOUS = "continuous" + def get_action_head(action_types: Union[Spec, Space]) -> Tuple[Dict[str, str], str]: """Returns the appropriate action head config based on the environment action_spec.""" if isinstance(action_types, (DiscreteArray, MultiDiscreteArray, Discrete, MultiDiscrete)): From 7da596853ce40c8173d759270752d530f7f4d2ad Mon Sep 17 00:00:00 2001 From: Louay Ben Nessir Date: Wed, 13 Nov 2024 13:05:34 +0100 Subject: [PATCH 139/139] fix: rec_qmix import --- mava/systems/q_learning/anakin/rec_qmix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mava/systems/q_learning/anakin/rec_qmix.py b/mava/systems/q_learning/anakin/rec_qmix.py index 2b485bd09..7dcccf75c 100644 --- a/mava/systems/q_learning/anakin/rec_qmix.py +++ b/mava/systems/q_learning/anakin/rec_qmix.py @@ -47,13 +47,13 @@ from mava.types import MarlEnv, Observation from mava.utils import make_env as environments from mava.utils.checkpointing import Checkpointer +from mava.utils.config import check_total_timesteps from mava.utils.jax_utils import ( switch_leading_axes, unreplicate_batch_dim, unreplicate_n_dims, ) from mava.utils.logger import LogEvent, MavaLogger -from mava.utils.total_timestep_checker import check_total_timesteps from mava.wrappers import episode_metrics