From 6081796bc00e8db4e17a337ae337e66d366444e4 Mon Sep 17 00:00:00 2001 From: Driss Guessous <32754868+drisspg@users.noreply.github.com> Date: Wed, 1 May 2024 12:29:46 -0700 Subject: [PATCH 01/61] perform in chunks (#196) --- test/dtypes/test_nf4.py | 14 +++++++++ torchao/dtypes/nf4tensor.py | 58 +++++++++++++------------------------ 2 files changed, 34 insertions(+), 38 deletions(-) diff --git a/test/dtypes/test_nf4.py b/test/dtypes/test_nf4.py index 55bbe0bcb9..2789315169 100644 --- a/test/dtypes/test_nf4.py +++ b/test/dtypes/test_nf4.py @@ -236,6 +236,20 @@ def test_smoketest_linear_compile(self, dtype: torch.dtype): out3 = torch.compile(torch.nn.functional.linear, mode='max-autotune')(inp, a_nf4) + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) + @parametrize("shape", [(16, 16), (32, 16)]) + @parametrize("chunk_size", [8, 16, 32]) + def test_chunk_size_equivalence(self, dtype: torch.dtype, shape, chunk_size): + a = torch.randn(shape, device='cuda', dtype=dtype) + with unittest.mock.patch("torchao.dtypes.nf4tensor.CHUNK_SIZE", chunk_size): + nf4_patched = to_nf4(a, 16, 2) + # This will be essentially no chunking since the numel is alot smaller than default chunk_size + nf4_base = to_nf4(a, 16, 2) + + torch.testing.assert_close(nf4_patched.quantized_data, nf4_base.quantized_data) + + instantiate_parametrized_tests(TestNF4Linear) if __name__ == "__main__": diff --git a/torchao/dtypes/nf4tensor.py b/torchao/dtypes/nf4tensor.py index f09d53821d..4628ce9949 100644 --- a/torchao/dtypes/nf4tensor.py +++ b/torchao/dtypes/nf4tensor.py @@ -1,10 +1,10 @@ import functools from dataclasses import dataclass +import math from typing import Dict, Tuple import torch import torch.nn.functional as F -from torch import Tensor aten = torch.ops.aten @@ -15,6 +15,14 @@ NF4_OPS_TABLE: Dict[Any, Any] = {} +# Note: Quantize in Chunks +# During quantization to NF4, one of the steps to convert from the original float number +# to the index of the nearest value in the NF4 format. This can cause a large memory spike +# Due to intermediates of the quantization process. Instead we process the original +# tensor in chunks. This is a tradeoff between memory and speed. This number seems to +# strike a good balance between memory and speed +CHUNK_SIZE = 1024**2 + def same_metadata(a: "NF4Tensor", b: "NF4Tensor"): both_nf4 = isinstance(a, NF4Tensor) and isinstance(b, NF4Tensor) @@ -375,7 +383,7 @@ def dequantize_scalers( @staticmethod def convert_to_norm_float_weight( - inpt_tensor: torch.Tensor, n_blocks: int, block_size: int, nf4: torch.tensor + inpt_tensor: torch.Tensor, n_blocks: int, block_size: int, nf4: torch.Tensor ) -> torch.Tensor: """Convert a tensor to the normalized float weight format""" flattened_tensor = inpt_tensor.flatten() @@ -393,9 +401,13 @@ def convert_to_norm_float_weight( scaled_blocks = blocks / scales # Returns a flattened tensor with each element quantized to nf4 index - quantized_blocks = NF4Tensor.quantize_tensor_nearest( - scaled_blocks.flatten(), nf4 - ) + # See Note: Quantize in Chunks + quantized_blocks = torch.empty(numel, dtype=torch.uint8, device=inpt_tensor.device) + flattened = scaled_blocks.flatten() + for chunk_num in range(math.ceil(numel / CHUNK_SIZE)): + start = chunk_num * CHUNK_SIZE + end = min(start + CHUNK_SIZE, numel) + quantized_blocks[start:end] = NF4Tensor.quantize_tensor_nearest(flattened[start:end], nf4).to(torch.uint8) # Combine the quantized elements into uint8 values # This lays out two consecutive elements in the same byte @@ -435,7 +447,7 @@ def get_original_weight(self) -> torch.Tensor: @staticmethod def quantize_tensor_nearest( - value: torch.float16, nf4: torch.Tensor + value: torch.Tensor, nf4: torch.Tensor ) -> torch.Tensor: """Quantize a float16 tensor to nf4 format to nearest and not rounded up""" value = value.unsqueeze(-1) # (numel, 1) @@ -445,36 +457,15 @@ def quantize_tensor_nearest( return closest_nf4 @staticmethod - - # inconsistently. - - # defined in `torch._C.TensorBase`. def dequantize(value: torch.Tensor, nf4: torch.Tensor) -> torch.Tensor: """Dequantize a nf4 value to bfloat16 format""" # return nf4.index_select(0, value) return nf4[value] - def unpack( - self, - ) -> Tuple[ - int, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Size - ]: - - # Size]` but got `Tuple[int, int, int, Tensor, Tensor, Tensor, Tensor]`. - return ( - self.block_size, - self.n_blocks, - self.scaler_block_size, - self.quantized_scalers, - self.quantization_factor, - self.scaler_mean, - self.quantized_data, - ) - - def __repr__(self): + def __repr__(self) -> str: return f"Quantized Data: {self.quantized_data}\nScalers: {self.quantized_scalers}\n" - def __str__(self): + def __str__(self) -> str: return f"NF4Tensor({self.shape}, {self.block_size})" def __tensor_flatten__(self): @@ -501,9 +492,6 @@ def __tensor_flatten__(self): ], ctx @staticmethod - - # `typing.Dict[, ]` to avoid runtime subscripting errors. - def __tensor_unflatten__(inner_tensors: Dict, metadata, outer_size, outer_stride): assert len(inner_tensors) == 5, "Expected 5 inner tensors" return NF4Tensor( @@ -567,18 +555,12 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): class LinearNF4(torch.autograd.Function): @staticmethod - - # inconsistently. - def forward(ctx, input: torch.Tensor, weight: NF4Tensor): """Save the quantized nf4 weight for backward pass""" ctx.nf4_weight = weight return F.linear(input, weight.to(input.dtype)) @staticmethod - - # inconsistently. - def backward(ctx, grad_output): """The nf4 weight will never require grad so we can just return the grad_output @ weight.to(grad_output.dtype)""" weight: NF4Tensor = ctx.nf4_weight From 6ae2c0bba7ef8fe40e4419d3f7b1954f553eaf6d Mon Sep 17 00:00:00 2001 From: Jesse Cai Date: Wed, 1 May 2024 14:36:15 -0700 Subject: [PATCH 02/61] Fix README links (#200) --- torchao/sparsity/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchao/sparsity/README.md b/torchao/sparsity/README.md index f7efe5b6a5..b18e996b58 100644 --- a/torchao/sparsity/README.md +++ b/torchao/sparsity/README.md @@ -44,7 +44,7 @@ The handoff point between these two pieces are sparse weights stored in a dense This also allows users with existing sparse weights in a dense format to take advantage of our fast sparse kernels. We anticipate many users to come up with their own custom frontend masking solution or to use another third party solution, as this is an active area of research. -![pruning_flow](https://private-user-images.githubusercontent.com/8041643/324612475-3873655f-3eab-40c7-8070-722b3eef4444.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTM5MjYwODAsIm5iZiI6MTcxMzkyNTc4MCwicGF0aCI6Ii84MDQxNjQzLzMyNDYxMjQ3NS0zODczNjU1Zi0zZWFiLTQwYzctODA3MC03MjJiM2VlZjQ0NDQucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDQyNCUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA0MjRUMDIyOTQwWiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9N2ZjZTAwNzgyMjc4MGE3ZDZlYTQ3MDZkOTA3YTkwM2I3ODJiYjg4NzE2N2E3ZGJjZGVkZDhjYjJhMTgwOThhOSZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.SXj5_j7CC61CB6hanWrubY7k4Fq9Oko985qD7qaOAy4) +![pruning_flow](https://private-user-images.githubusercontent.com/8041643/324607153-ba91eaca-14ce-4608-9db8-6cbb9ea1f9ec.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTQ1OTgzOTYsIm5iZiI6MTcxNDU5ODA5NiwicGF0aCI6Ii84MDQxNjQzLzMyNDYwNzE1My1iYTkxZWFjYS0xNGNlLTQ2MDgtOWRiOC02Y2JiOWVhMWY5ZWMucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDUwMSUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA1MDFUMjExNDU2WiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9YWVjOWQ5ZjFjMWZmNjg4ZTgyZGFkYWU3ZDQ3MDBjMTZkNzczZWQxYzczN2ZiM2ZjZGY0NjUwMGUwY2UwZDA1YyZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.ni5F_wDhNkeupMJ84bFNxhaSO3xPH-9zecz_933Uu68) Below, we provide an example of accelerating a model with 2:4 sparsity + bf16 using our PyTorch APIs. @@ -97,7 +97,7 @@ Note that this section focuses on **pruning**, instead of **sparse training**. T Roughly, the flow for achieving a more performant pruned model looks like this: -![flow](https://private-user-images.githubusercontent.com/8041643/324612485-c7008b1d-6c1a-4424-b3d1-34c55a25460d.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTM5MjYwODAsIm5iZiI6MTcxMzkyNTc4MCwicGF0aCI6Ii84MDQxNjQzLzMyNDYxMjQ4NS1jNzAwOGIxZC02YzFhLTQ0MjQtYjNkMS0zNGM1NWEyNTQ2MGQucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDQyNCUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA0MjRUMDIyOTQwWiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9NWVlY2I3OTBlM2ViZTZiZmMwYmQzYjA3NjM1ZDY3NmZkZjNiMzk3M2JhMzkwOTYyZmM4Mjc5MWJkYTI2M2MxMiZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.neMkWGtDbGGw0Vn7MA1RJ_Q2iAvGIkcjRD-pLAtNd5k) +![flow](https://private-user-images.githubusercontent.com/8041643/324607146-53542488-65ce-4d99-a3ae-21e724f89467.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTQ1OTgzOTYsIm5iZiI6MTcxNDU5ODA5NiwicGF0aCI6Ii84MDQxNjQzLzMyNDYwNzE0Ni01MzU0MjQ4OC02NWNlLTRkOTktYTNhZS0yMWU3MjRmODk0NjcucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDUwMSUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA1MDFUMjExNDU2WiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9ZWJlYWMzZDFmNzc2NDM1MGI2ODNlMjUxZjQxYTAwYzhhNzBkNGU2ZGIwYTg4NzA5Yjk3N2JkNzI4MmUyNzg3NiZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.Hxk5XMuJXhNsORVNNgcKNRCk7W1nT4CndLTAC3Oz0qE) The general idea behind pruning is that we can mask out some of the weights of a trained neural network and recover any accuracy loss. The resultant pruned model can be run on optimized kernels that take advantage of this sparsity for accelerated inference. From ac53d7fbab6dce6a390d95ca0696d64c205ef23c Mon Sep 17 00:00:00 2001 From: "Wei (Will) Feng" <134637289+weifengpy@users.noreply.github.com> Date: Wed, 1 May 2024 16:49:08 -0700 Subject: [PATCH 03/61] [FSDP2][NF4Tensor][2/n] implement torch.chunk and other ops (#150) --- test/dtypes/test_nf4.py | 189 +++++++++++++++++++- torchao/dtypes/nf4tensor.py | 334 +++++++++++++++++++++++++++++++++++- 2 files changed, 515 insertions(+), 8 deletions(-) diff --git a/test/dtypes/test_nf4.py b/test/dtypes/test_nf4.py index 2789315169..3e8b89f9f0 100644 --- a/test/dtypes/test_nf4.py +++ b/test/dtypes/test_nf4.py @@ -1,6 +1,7 @@ import logging import unittest from packaging import version +import math import torch from torch import nn @@ -10,11 +11,17 @@ parametrize, run_tests, ) -from torchao.dtypes.nf4tensor import linear_nf4, NF4Tensor, to_nf4 +from torchao.dtypes.nf4tensor import ( + linear_nf4, + NF4Tensor, + to_nf4, + _INNER_TENSOR_NAMES_FOR_SHARDING, +) import torch.nn.functional as F import io from collections import OrderedDict import torchao +from typing import Tuple, Union bnb_available = False @@ -234,8 +241,7 @@ def test_smoketest_linear_compile(self, dtype: torch.dtype): a_nf4 = torchao.dtypes.to_nf4(a, 16, 2) inp = torch.randn(2, 32, 32, dtype=a.dtype, device=a.device) out3 = torch.compile(torch.nn.functional.linear, mode='max-autotune')(inp, a_nf4) - - + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) @parametrize("shape", [(16, 16), (32, 16)]) @@ -250,7 +256,184 @@ def test_chunk_size_equivalence(self, dtype: torch.dtype, shape, chunk_size): torch.testing.assert_close(nf4_patched.quantized_data, nf4_base.quantized_data) + +class TestFSDPOps(TestCase): + @parametrize("input_size", [512 * 512, (512 * 512,), (512, 512)]) + def test_torch_chunk_valid(self, input_size: Union[Tuple[int], int]): + num_chunks = 2 + nf4_tensor = to_nf4(torch.randn(input_size)) + chunks = list(torch.chunk(nf4_tensor, num_chunks)) + self.assertEqual(len(chunks), num_chunks) + if isinstance(input_size, int): + expected_size0 = input_size // num_chunks + else: + expected_size0 = input_size[0] // num_chunks + for chunk in chunks: + self.assertEqual(chunk.size(0), expected_size0) + + @parametrize("input_size", [511 * 512, (511 * 512,), (511, 512)]) + def test_torch_chunk_invalid_divide(self, input_size: Union[Tuple[int], int]): + num_chunks = 2 + with self.assertRaisesRegex(AssertionError, "Number of scalers must be divisible by scaler block size"): + nf4_tensor = to_nf4(torch.randn(input_size)) + torch.chunk(nf4_tensor, num_chunks) + + @parametrize("input_size", [(512, 512, 512)]) + def test_torch_chunk_invalid_3d(self, input_size: Union[Tuple[int], int]): + num_chunks = 2 + with self.assertRaisesRegex(AssertionError, "expect input tensor dim <= 2"): + nf4_tensor = to_nf4(torch.randn(input_size)) + torch.chunk(nf4_tensor, num_chunks) + + @parametrize("input_size", [512 * 512, (512 * 512,), (512, 512)]) + def test_tensor_new_zeros_valid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + nf4_tensor_zeros = nf4_tensor.new_zeros(input_size) + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4_tensor_zeros, attr) + self.assertEqual(torch.count_nonzero(inner_tensor), 0) + expected_size = input_size if not isinstance(input_size, int) else (input_size, ) + self.assertEqual(nf4_tensor_zeros.size(), torch.Size(expected_size)) + + @parametrize("input_size", [512 * 512, (512 * 512,), (512, 512)]) + def test_tensor_new_zeros_invalid(self, input_size: Union[Tuple[int], int]): + if isinstance(input_size, int): + new_size = input_size + 1 + elif len(input_size) == 1: + new_size = (input_size[0] + 1, ) + else: + new_size = (input_size[0] + 1, input_size[1]) + nf4_tensor = to_nf4(torch.randn(input_size)) + with self.assertRaisesRegex(NotImplementedError, "aten.new_zeros\\(NF4Tensor\\) with new size"): + nf4_tensor_zeros = nf4_tensor.new_zeros(new_size) + + @parametrize("input_size", [512 * 512, (512 * 512,), (512, 512)]) + def test_tensor_slice_valid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + orig_attrs, _ = nf4_tensor.__tensor_flatten__() + orig_sizes = dict([(attr, getattr(nf4_tensor, attr).size()) for attr in orig_attrs]) + end_idx = input_size if isinstance(input_size, int) else input_size[0] + sliced_tensor = nf4_tensor[:end_idx] + self.assertEqual(nf4_tensor.size(), sliced_tensor.size()) + attrs, _ = sliced_tensor.__tensor_flatten__() + for attr in attrs: + orig_storage = getattr(nf4_tensor, attr).untyped_storage().data_ptr() + sliced_tensor_inner = getattr(sliced_tensor, attr) + self.assertEqual(sliced_tensor_inner.untyped_storage().data_ptr(), orig_storage) + self.assertEqual(sliced_tensor_inner.size(), orig_sizes[attr]) + + def test_tensor_slice_1d_invalid(self): + nf4_tensor = to_nf4(torch.randn(512 * 512)) + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with customized step"): + nf4_tensor[..., ::2] + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with start"): + nf4_tensor[1:] + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with end"): + nf4_tensor[:2] + + def test_tensor_slice_2d_invalid(self): + nf4_tensor = to_nf4(torch.randn((512, 512))) + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with dim"): + nf4_tensor[:, :511] + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with start"): + nf4_tensor[1:] + with self.assertRaisesRegex(NotImplementedError, "aten.slice\\(NF4Tensor\\) with end"): + nf4_tensor[:2] + + @parametrize("input_size", [(512 * 512,), (512, 512)]) + def test_tensor_view_valid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + viewed_tensor = nf4_tensor.view(-1) + self.assertEqual(viewed_tensor.dim(), 1) + self.assertEqual(viewed_tensor.numel(), math.prod(input_size)) + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(viewed_tensor, attr) + self.assertEqual(inner_tensor.size(0), inner_tensor.numel()) + + @parametrize("input_size", [(512 * 512,), (512, 512)]) + def test_tensor_view_invalid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + if len(input_size) == 1: + with self.assertRaisesRegex(NotImplementedError, "aten.view\\(NF4Tensor\\) with size"): + nf4_tensor.view(input_size) + if len(input_size) == 2: + with self.assertRaisesRegex(NotImplementedError, "aten.view\\(NF4Tensor\\) with len\\(size\\)"): + nf4_tensor.view(input_size) + + @parametrize("input_size", [512 * 512, (512 * 512,), (512, 512)]) + def test_tensor_as_strided_valid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + nf4_tensor_strided = torch.as_strided(nf4_tensor, nf4_tensor.size(), nf4_tensor.stride(), nf4_tensor.storage_offset()) + self.assertEqual(nf4_tensor_strided.size(), nf4_tensor.size()) + self.assertEqual(nf4_tensor_strided.stride(), nf4_tensor.stride()) + self.assertEqual(nf4_tensor_strided.storage_offset(), nf4_tensor.storage_offset()) + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor_orig = getattr(nf4_tensor, attr) + inner_tensor_strided = getattr(nf4_tensor_strided, attr) + self.assertEqual(inner_tensor_strided.size(), inner_tensor_orig.size()) + self.assertEqual(inner_tensor_strided.stride(), inner_tensor_orig.stride()) + self.assertEqual(inner_tensor_strided.storage_offset(), inner_tensor_orig.storage_offset()) + + + @parametrize("input_size", [(512 * 512,), (512, 512)]) + def test_tensor_as_strided_invalid(self, input_size: Union[Tuple[int], int]): + nf4_tensor = to_nf4(torch.randn(input_size)) + if len(input_size) == 1: + size = (input_size[0] - 1, ) + else: + size = (input_size[0] - 1, input_size[1]) + with self.assertRaisesRegex(NotImplementedError, "aten.as_strided\\(NF4Tensor\\) different numel"): + torch.as_strided(nf4_tensor, size, nf4_tensor.stride(), nf4_tensor.storage_offset()) + with self.assertRaisesRegex(NotImplementedError, "aten.as_strided\\(NF4Tensor\\) only support original storage offset"): + torch.as_strided(nf4_tensor, nf4_tensor.size(), nf4_tensor.stride(), 1) + + if len(input_size) == 2: + with self.assertRaisesRegex(NotImplementedError, "aten.as_strided\\(NF4Tensor\\) only support continuous stride"): + stride = (nf4_tensor.stride()[1], nf4_tensor.stride()[0]) + torch.as_strided(nf4_tensor, nf4_tensor.size(), stride, nf4_tensor.storage_offset()) + + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_pin_memory(self): + nf4_tensor = to_nf4(torch.randn(512 * 512)) + self.assertFalse(nf4_tensor.is_pinned()) + + nf4_tensor = nf4_tensor.pin_memory() + self.assertTrue(nf4_tensor.is_pinned()) + + nf4_tensor = to_nf4(torch.randn(512 * 512, device='cuda')) + self.assertFalse(nf4_tensor.is_pinned()) + + + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_to_cuda(self): + nf4_tensor = to_nf4(torch.randn(512 * 512)) + self.assertEqual(nf4_tensor.device.type, "cpu") + nf4_tensor = nf4_tensor.to("cuda", non_blocking=True) + self.assertEqual(nf4_tensor.device.type, "cuda") + + nf4_tensor = to_nf4(torch.randn(512 * 512)) + self.assertEqual(nf4_tensor.device.type, "cpu") + nf4_tensor = nf4_tensor.to("cuda") + self.assertEqual(nf4_tensor.device.type, "cuda") + + nf4_tensor = to_nf4(torch.randn(512 * 512)) + self.assertEqual(nf4_tensor.device.type, "cpu") + nf4_tensor = nf4_tensor.to("cuda", torch.bfloat16) + self.assertEqual(nf4_tensor.device.type, "cuda") + self.assertEqual(nf4_tensor.dtype, torch.bfloat16) + + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_to_cpu(self): + nf4_tensor = to_nf4(torch.randn(512 * 512, device='cuda')) + nf4_tensor = nf4_tensor.cpu() + self.assertEqual(nf4_tensor.device.type, "cpu") + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4_tensor, attr) + self.assertEqual(inner_tensor.device.type, "cpu") + + instantiate_parametrized_tests(TestNF4Linear) +instantiate_parametrized_tests(TestFSDPOps) if __name__ == "__main__": run_tests() diff --git a/torchao/dtypes/nf4tensor.py b/torchao/dtypes/nf4tensor.py index 4628ce9949..48249434b7 100644 --- a/torchao/dtypes/nf4tensor.py +++ b/torchao/dtypes/nf4tensor.py @@ -2,19 +2,28 @@ from dataclasses import dataclass import math from typing import Dict, Tuple +import math +import sys +from enum import Enum, auto import torch import torch.nn.functional as F +from torch import Tensor +from torch.distributed.device_mesh import DeviceMesh +from torch._prims_common import make_contiguous_strides_for aten = torch.ops.aten c10d_functional = torch.ops.c10d_functional -from typing import Any, Tuple +from typing import Any, Optional, Tuple, Union, List NF4_OPS_TABLE: Dict[Any, Any] = {} + +_INNER_TENSOR_NAMES_FOR_SHARDING = ["quantized_scalers", "quantization_factor", "quantized_data"] + # Note: Quantize in Chunks # During quantization to NF4, one of the steps to convert from the original float number # to the index of the nearest value in the NF4 format. This can cause a large memory spike @@ -45,11 +54,219 @@ def decorator(func): return decorator -@implements([torch.ops.aten.detach.default, torch.ops.aten.detach]) +def construct_nf4_args(nf4tensor: "NF4Tensor", kwargs: Optional[Dict[str, Any]] = None): + if kwargs is None: + kwargs = {} + tensor_meta = SubclassTensorArgs( + kwargs.get("size", nf4tensor.size()), + kwargs.get("stride", nf4tensor.stride()), + kwargs.get("storage_offset", nf4tensor.storage_offset()), + kwargs.get("dtype", nf4tensor.dtype), + kwargs.get("device", nf4tensor.device), + kwargs.get("requires_grad", nf4tensor.requires_grad), + ) + return ( + tensor_meta, + kwargs.get("block_size", nf4tensor.block_size), + kwargs.get("n_blocks", nf4tensor.n_blocks), + kwargs.get("scaler_block_size", nf4tensor.scaler_block_size), + kwargs.get("quantized_scalers", nf4tensor.quantized_scalers), + kwargs.get("quantization_factor", nf4tensor.quantization_factor), + kwargs.get("scaler_mean", nf4tensor.scaler_mean), + kwargs.get("quantized_data", nf4tensor.quantized_data), + kwargs.get("nf4", nf4tensor.nf4), + ) + + +# __torch_dispatch__ utils: apply aten op to inner tensors +def apply_to_inner_tensors(nf4tensor: "NF4Tensor", aten_op, args, kwargs): + attr_to_tensor = {} + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + attr_to_tensor[attr] = aten_op(getattr(nf4tensor, attr), *args, **kwargs) + return attr_to_tensor + +# __torch_function__ utils: call tensor ops from inner tensors +def call_from_inner_tensors(nf4tensor: "NF4Tensor", method_name: str, args, kwargs): + attr_to_tensor = {} + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4tensor, attr) + func = getattr(inner_tensor, method_name) + attr_to_tensor[attr] = func(*args, **kwargs) + return attr_to_tensor + +class CompareOp(Enum): + EQ = auto() + LT = auto() + +def expect_num_of_args(op: CompareOp, num: int, msg: str): + def decorator(func): + @functools.wraps(func) + def wrapper(aten_op, args, kwargs=None): + if op == CompareOp.LT and not (len(args) < num): + raise NotImplementedError(msg) + return func(aten_op, args, kwargs) + return wrapper + return decorator + +def expect_arg_value_at_k(k: int, op: CompareOp, value: Any, msg: str): + def decorator(func): + @functools.wraps(func) + def wrapper(aten_op, args, kwargs=None): + if op == CompareOp.EQ and not (args[k] == value): + raise NotImplementedError(msg + str(args[k])) + return func(aten_op, args, kwargs) + return wrapper + return decorator + +def expect_args_len_at_k(k: int, op: CompareOp, value: Any, msg: str): + def decorator(func): + @functools.wraps(func) + def wrapper(aten_op, args, kwargs=None): + if op == CompareOp.LT and not (len(args[k]) < value): + raise NotImplementedError(msg + str(len(args[k]))) + elif op == CompareOp.EQ and not (len(args[k]) == value): + raise NotImplementedError(msg + str(len(args[k]))) + return func(aten_op, args, kwargs) + return wrapper + return decorator + + +@implements([torch.ops.aten.detach]) def noop_detach(func, *args, **kwargs): return args[0][0] +@implements( + [ + aten.detach.default, + ] +) +def nf4_detach(aten_op, args, kwargs=None): + nf4tensor = args[0] + updated_attrs = apply_to_inner_tensors(nf4tensor, aten_op, args[1:], kwargs) + return NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs)) + + +@implements( + [ + aten.split.Tensor, + ] +) +def nf4_split(aten_op, args, kwargs=None): + if len(args) == 3 and args[2] != 0: + raise NotImplementedError(f"aten.split(NF4Tensor, dim={args[2]})") + nf4tensor = args[0] + num_chunks = nf4tensor.size(0) // args[1] + + attr_to_chunks = {} + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4tensor, attr) + assert inner_tensor.numel() % num_chunks == 0, f"{attr}.numel() not divisible by {num_chunks}" + chunks = aten_op(inner_tensor, inner_tensor.numel() // num_chunks, **kwargs) + attr_to_chunks[attr] = chunks + + orig_dim = nf4tensor.dim() + if orig_dim == 1: + chunked_size = (nf4tensor.size(0) // num_chunks, ) + elif orig_dim == 2: + chunked_size = (nf4tensor.size(0) // num_chunks, nf4tensor.size(1)) + else: + chunked_size = () + raise NotImplementedError(f"aten.split(NF4Tensor) wherer NF4Tensor.dim() = {orig_dim}") + + nf4_chunks = [] + for idx in range(num_chunks): + updated_attrs = { + "size": chunked_size + } + for attr, chunks in attr_to_chunks.items(): + updated_attrs[attr] = chunks[idx] + nf4_chunks.append(NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs))) + return nf4_chunks + +@implements( + [ + aten.new_zeros.default, + ] +) +@expect_args_len_at_k(1, CompareOp.LT, 3, "aten.view(NF4Tensor) with len(size)=") +def nf4_new_zeros(aten_op, args, kwargs=None): + nf4tensor = args[0] + new_size = tuple(args[1]) + new_size_dim = len(new_size) + if nf4tensor.numel() % math.prod(new_size) != 0: + raise NotImplementedError(f"aten.new_zeros(NF4Tensor) with new size {new_size}") + ratio = nf4tensor.numel() // math.prod(new_size) + + updated_attrs = {} + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4tensor, attr) + assert inner_tensor.size(0) % ratio == 0, f"{attr}.numel() must be divisible by {ratio}" + inner_tensor = aten_op(inner_tensor, [inner_tensor.size(0) // ratio], **kwargs) + updated_attrs[attr] = inner_tensor + updated_attrs["size"] = new_size + + return NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs)) + +@implements( + [ + aten.slice.Tensor, + ] +) +@expect_num_of_args(CompareOp.LT, 5, "aten.slice(NF4Tensor) with customized step") +@expect_arg_value_at_k(1, CompareOp.EQ, 0, "aten.slice(NF4Tensor) with dim=") +@expect_arg_value_at_k(2, CompareOp.EQ, 0, "aten.slice(NF4Tensor) with start=") +def nf4_slice(aten_op, args, kwargs=None): + nf4tensor = args[0] + # for tensor 512 x 512, tensor[:, :512] dispatch to + # aten.slice(dim = 0, end=sys.maxsize) + if not args[3] in [nf4tensor.size(0), sys.maxsize]: + raise NotImplementedError(f"aten.slice(NF4Tensor) with end={args[3]}") + return NF4Tensor(*construct_nf4_args(nf4tensor)) + +@implements( + [ + aten.view.default, + ] +) +@expect_args_len_at_k(1, CompareOp.EQ, 1, "aten.view(NF4Tensor) with len(size)=") +def nf4_view(aten_op, args, kwargs=None): + nf4tensor = args[0] + size = args[1] + if size[0] != -1: + raise NotImplementedError(f"aten.view(NF4Tensor) with size={size}") + updated_attrs = apply_to_inner_tensors(nf4tensor, aten_op, args[1:], kwargs) + updated_attrs.update({ + "size": [nf4tensor.numel()], + "stride": (1, ), + }) + return NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs)) + +@implements( + [ + aten.as_strided.default, + ] +) +@expect_args_len_at_k(1, CompareOp.LT, 3, "aten.as_strided(NF4Tensor) only support dim <= 2 but got dim=") +def nf4_as_strided(aten_op, args, kwargs=None): + nf4tensor = args[0] + size = args[1] + stride = tuple(args[2]) + storage_offset = args[3] + if math.prod(size) != nf4tensor.numel(): + raise NotImplementedError(f"aten.as_strided(NF4Tensor) different numel={nf4tensor.numel()} and size={size}") + if stride != make_contiguous_strides_for(size): + raise NotImplementedError(f"aten.as_strided(NF4Tensor) only support continuous stride={make_contiguous_strides_for(size)} but got stride={stride}") + if nf4tensor.storage_offset() != storage_offset: + raise NotImplementedError(f"aten.as_strided(NF4Tensor) only support original storage offset {nf4tensor.storage_offset()} but got {storage_offset}") + kwargs = { + "size": torch.Size(size), + "stride": stride, + "storage_offset": storage_offset, + } + return NF4Tensor(*construct_nf4_args(nf4tensor, kwargs)) + + @implements([torch.ops.aten._to_copy.default]) def _to_copy(func, *args, **kwargs): if not args[0][0].is_contiguous(): @@ -128,6 +345,31 @@ def copy_(func, *args, **kwargs): return original.copy_(same_meta_nf4) +@implements( + [ + aten.is_pinned.default, + ] +) +def nf4_is_pinned(aten_op, args, kwargs=None): + nf4tensor = args[0] + for attr in _INNER_TENSOR_NAMES_FOR_SHARDING: + inner_tensor = getattr(nf4tensor, attr) + if not aten_op(inner_tensor, *(args[1:]), **kwargs): + return False + return True + + +@implements( + [ + aten._pin_memory.default, + ] +) +def nf4_pin_memory(aten_op, args, kwargs=None): + nf4tensor = args[0] + updated_attrs = apply_to_inner_tensors(nf4tensor, aten_op, args[1:], kwargs) + return NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs)) + + @dataclass class SubclassTensorArgs: original_shape: torch.Size @@ -232,7 +474,7 @@ def from_tensor( block_size: int, scaler_block_size: int, ): - assert inpt_tensor.dim() <= 2 + assert inpt_tensor.dim() <= 2, f"expect input tensor dim <= 2 but got dim = {inpt_tensor.dim()}" assert ( inpt_tensor.numel() % block_size == 0 ), f"Input tensor must be divisible by block size, got {inpt_tensor.numel()} and {block_size}" @@ -553,6 +795,67 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): return func(*args, **kwargs) + def fsdp_pre_all_gather(self, mesh: DeviceMesh) -> Tuple[Tuple[torch.Tensor, ...], Any]: + return ( + self.quantized_scalers, + self.quantization_factor, + self.quantized_data, + ), ( + SubclassTensorArgs( + self.size(), + self.stride(), + self.storage_offset(), + self.dtype, + self.device, + self.requires_grad, + ), + self.block_size, + self.n_blocks, + self.scaler_block_size, + self.scaler_mean, + self.nf4, + mesh.get_group().size(), + ) + + def fsdp_post_all_gather( + self, + all_gather_outputs: Tuple[torch.Tensor, ...], + metadata: Any, + param_dtype: torch.dtype, + *, + out: Optional[torch.Tensor] = None, + ) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], None]: + (quantized_scalers, quantization_factor, quantized_data) = all_gather_outputs + (tensor_meta, block_size, n_blocks, scaler_block_size, scaler_mean, nf4, pg_size) = metadata + if len(tensor_meta.original_shape) != 2: + raise NotImplementedError(f"only support 2D shape but got dim={len(tensor_meta.original_shape)}") + tensor_meta.original_shape = torch.Size((tensor_meta.original_shape[0] * pg_size, tensor_meta.original_shape[1])) + if out is not None: + # TODO: add param dtype for mixed precision + assert isinstance(out, NF4Tensor), f"{type(out)}" + assert ( + quantized_scalers.untyped_storage().data_ptr() + == out.quantized_scalers.untyped_storage().data_ptr() and + quantization_factor.untyped_storage().data_ptr() + == out.quantization_factor.untyped_storage().data_ptr() and + quantized_data.untyped_storage().data_ptr() + == out.quantized_data.untyped_storage().data_ptr() + ), f"Expects out's data to be the all-gather output" + return + + return NF4Tensor( + tensor_meta, + block_size, + n_blocks, + scaler_block_size, + quantized_scalers, + quantization_factor, + scaler_mean, + quantized_data, + nf4, + ), (quantized_scalers, quantization_factor, quantized_data) + + class LinearNF4(torch.autograd.Function): @staticmethod def forward(ctx, input: torch.Tensor, weight: NF4Tensor): @@ -595,12 +898,33 @@ def decorator(func): @implements_torch_function(torch.Tensor.to) def function_to_dtype(*args, **kwargs): - if isinstance(args[0], NF4Tensor) and isinstance(args[1], torch.dtype): + tensor = args[0] + if isinstance(args[1], torch.dtype): # Tensor.to(dtype, non_blocking, copy, memory_format) - return args[0].get_original_weight().to(*args[1:], **kwargs) + return tensor.get_original_weight().to(*args[1:], **kwargs) + elif ( + isinstance(args[1], torch.device) or ( + isinstance(args[1], str) and ( + args[1] == "cpu" or args[1].startswith("cuda") + ) + ) + ) and len(args) == 2: + # Tensor.to(device, non_blocking) + device = args[1] + updated_attrs = call_from_inner_tensors(tensor, "to", args[1:], kwargs) + updated_attrs["device"] = device + return NF4Tensor(*construct_nf4_args(tensor, updated_attrs)) else: # Tensor.to(device, dtype, non_blocking, copy, memory_format) # Tensor.to(other, non_blocking, copy) raise NotImplementedError( f"NF4Tensor.to({args[1:]}, {kwargs}) is not supported, passing to dispatch" ) + + +@implements_torch_function(torch.Tensor.cpu) +def function_cpu(*args, **kwargs): + nf4tensor = args[0] + updated_attrs = call_from_inner_tensors(nf4tensor, "cpu", args[1:], kwargs) + updated_attrs["device"] = "cpu" + return NF4Tensor(*construct_nf4_args(nf4tensor, updated_attrs)) From eb037531d433113922284acaebf932692749162a Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Thu, 2 May 2024 17:35:36 -0700 Subject: [PATCH 04/61] Enable FSDP Test in CI (#207) * Enable FSDP Test in CI * yolo * yolo * yolo --- dev-requirements.txt | 13 +++++++++---- test/hqq/test_triton_mm.py | 29 +++++++++++------------------ 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 8a8ed1e491..76a984d939 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,10 +1,15 @@ -pytest +# Test utilities +pytest==7.4.0 expecttest +unittest-xml-reporting parameterized packaging transformers + +# For prototype features and benchmarks bitsandbytes #needed for testing triton quant / dequant ops for 8-bit optimizers -matplotlib # needed for triton benchmarking -pandas # also for triton benchmarking -transformers #for galore testing +matplotlib +pandas + +# Custom CUDA Extensions ninja diff --git a/test/hqq/test_triton_mm.py b/test/hqq/test_triton_mm.py index 23f6c60f70..471ede4250 100644 --- a/test/hqq/test_triton_mm.py +++ b/test/hqq/test_triton_mm.py @@ -1,17 +1,11 @@ # Skip entire test if triton is not available, otherwise CI failure import pytest -try: - import triton - import hqq - if int(triton.__version__.split(".")[0]) < 3: - pytest.skip("triton >= 3.0.0 is required to run this test", allow_module_level=True) -except ImportError: - pytest.skip("triton and hqq required to run this test", allow_module_level=True) - -import itertools -import torch - -from hqq.core.quantize import HQQLinear, BaseQuantizeConfig + +triton = pytest.importorskip("triton", minversion="3.0.0", reason="Triton > 3.0.0 required to run this test") +hqq = pytest.importorskip("hqq", reason="hqq required to run this test") +HQQLinear = pytest.importorskip("hqq.core.quantize.HQQLinear", reason="HQQLinear required to run this test") +BaseQuantizeConfig = pytest.importorskip("hqq.core.quantize.BaseQuantizeConfig", reason="HQQLinear required to run this test") + from torchao.prototype.hqq import triton_mixed_mm, pack_2xint4 @@ -61,7 +55,7 @@ def test_mixed_mm(shape, group_size, axis, dtype, transposed, kernel_type, quant **dict(group_size=group_size, axis=axis), } M, N, K = shape - + linear = torch.nn.Linear(K, N, bias=False, dtype=dtype, device="cuda") quant_config = BaseQuantizeConfig( @@ -81,19 +75,19 @@ def test_mixed_mm(shape, group_size, axis, dtype, transposed, kernel_type, quant scales, zeros = meta["scale"], meta["zero"] scales = scales.reshape(N, -1) zeros = zeros.reshape(N, -1) - + if transposed: x = torch.randn(M, N, dtype=dtype, device="cuda") - hqq_out = x @ W_dq + hqq_out = x @ W_dq - #Pack uint8 W_q, then run fused dequant matmul + #Pack uint8 W_q, then run fused dequant matmul packed_w = pack_2xint4(W_q) tt_out = triton_mixed_mm( x, packed_w, scales, zeros, transposed=True, group_size=group_size, fp8_fast_accum=False, kernel_type=kernel_type ) else: x = torch.randn(M, K, dtype=dtype, device="cuda") - hqq_out = x @ W_dq.T + hqq_out = x @ W_dq.T packed_w = pack_2xint4(W_q.T) tt_out = triton_mixed_mm( @@ -101,4 +95,3 @@ def test_mixed_mm(shape, group_size, axis, dtype, transposed, kernel_type, quant ) assert check(hqq_out, tt_out, max_diff=1e-2 if dtype == torch.bfloat16 else 1e-3) - From ec9d9d857b991a047fadf96c0906c776040d0805 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 2 May 2024 17:38:08 -0700 Subject: [PATCH 05/61] quant primitives: always set min val for scale (#201) Summary: This is to avoid div by 0 in quantize Test Plan: python test/quantization/test_quant_primitives.py -k test_choose_qparams_tensor_asym_eps Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 9 +++++++++ torchao/quantization/quant_primitives.py | 5 +++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 6186714e3b..8547532b78 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -231,6 +231,15 @@ def test_quantize_dequantize_channel_asym_4d_multi_dim_reduction(self): # we don't have corresponding ops in existing primitives, so just make sure it runs and it's close to float torch.testing.assert_allclose(dequantized, input, rtol=2, atol=0.02) + def test_choose_qparams_tensor_asym_eps(self): + input = torch.zeros(10, 10) + mapping_type = MappingType.ASYMMETRIC + dtype = torch.int8 + block_size = (10, 10) + scale, zero_point = choose_qparams_affine(input, mapping_type, block_size, dtype) + eps = torch.finfo(torch.float32).eps + self.assertEqual(scale, eps) + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index bd4bcce1aa..42f32a9c78 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -287,8 +287,9 @@ def choose_qparams_affine( else: raise RuntimeError(f"Unsupported mapping type: {mapping_type}") - if eps is not None: - scale = torch.clamp(scale, min=eps) + if eps is None: + eps = torch.finfo(input.dtype).eps + scale = torch.clamp(scale, min=eps) return scale.to(dtype=scale_dtype), zero_point.to(dtype=zero_point_dtype) From 500e507737a9636fb90cfdd34d81e463eb23c76a Mon Sep 17 00:00:00 2001 From: leozhang Date: Fri, 3 May 2024 09:32:50 +0800 Subject: [PATCH 06/61] [pruning]add dropout to list of supported activation functions (#194) * [pruning]add dropout to list of supported activation functions * [docs] add cudamode's link in README --------- Co-authored-by: Jesse Cai --- README.md | 2 +- torchao/sparsity/prototype/pruner/base_structured_sparsifier.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d1d005486c..8f1c805100 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # torchao: PyTorch Architecture Optimization -[![](https://dcbadge.vercel.app/api/server/cudamode?style=flat)](discord.gg/cudamode) +[![](https://dcbadge.vercel.app/api/server/cudamode?style=flat)](https://discord.gg/cudamode) This repository is currently under heavy development - if you have suggestions on the API or use-cases you'd like to be covered, please open an [issue](https://github.com/pytorch/ao/issues) diff --git a/torchao/sparsity/prototype/pruner/base_structured_sparsifier.py b/torchao/sparsity/prototype/pruner/base_structured_sparsifier.py index a1a34b77f8..b8e4112a79 100644 --- a/torchao/sparsity/prototype/pruner/base_structured_sparsifier.py +++ b/torchao/sparsity/prototype/pruner/base_structured_sparsifier.py @@ -57,6 +57,7 @@ def _get_supported_activation_functions(): F.softsign, F.tanhshrink, F.gelu, + F.dropout, } return SUPPORTED_ACTIVATION_FUNCTIONS @@ -84,6 +85,7 @@ def _get_supported_activation_modules(): nn.Softsign, nn.Tanhshrink, nn.GELU, + nn.Dropout, } return SUPPORTED_ACTIVATION_MODULES From 1d9fea924a80deb82705d7806eb5efa6b6de7d1d Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 2 May 2024 19:03:55 -0700 Subject: [PATCH 07/61] Reduce memory usage for symmetric choose_qparams (#210) Summary: Also unified the impl a bit more Test Plan: python test/quantization/test_quant_primitives.py Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 10 ++++++++++ torchao/quantization/quant_primitives.py | 23 +++++++++++----------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 8547532b78..139116def2 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -240,6 +240,16 @@ def test_choose_qparams_tensor_asym_eps(self): eps = torch.finfo(torch.float32).eps self.assertEqual(scale, eps) + @unittest.skipIf(not torch.cuda.is_available(), "skipping when cuda is not available") + def test_get_group_qparams_symmetric_memory(self): + """Check the memory usage of the op""" + weight = torch.randn(1024, 1024).to(device="cuda") + original_mem_use = torch.cuda.memory_allocated() + n_bit = 4 + groupsize = 128 + (scale_ao, _) = get_group_qparams_symmetric(weight, n_bit, groupsize) + after_choose_qparams_mem_use = torch.cuda.memory_allocated() + self.assertTrue(after_choose_qparams_mem_use < 1.2 * original_mem_use) if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 42f32a9c78..4d6a7666aa 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -260,6 +260,8 @@ def choose_qparams_affine( Tuple of scales and zero_points Tensor with requested dtype """ quant_min, quant_max = _get_and_check_qmin_qmax(target_dtype, quant_min, quant_max) + assert mapping_type in [MappingType.SYMMETRIC, MappingType.ASYMMETRIC], f"Unsupported mapping type: {mapping_type}" + if scale_dtype is None: scale_dtype = input.dtype if zero_point_dtype is None: @@ -269,23 +271,20 @@ def choose_qparams_affine( shape_for_reduction, reduction_dims = _get_reduction_params(block_size, input.size()) input = input.view(shape_for_reduction) - if mapping_type == MappingType.SYMMETRIC: - amax = torch.amax(torch.abs(input), dim=reduction_dims, keepdim=False) - scale = amax / (float(quant_max - quant_min) / 2) - zero_point = torch.ones_like(scale) - zero_point *= int((quant_min + quant_max + 1) / 2) - elif mapping_type == MappingType.ASYMMETRIC: - min_val = torch.amin(input, dim=reduction_dims, keepdim=False) - max_val = torch.amax(input, dim=reduction_dims, keepdim=False) + min_val = torch.amin(input, dim=reduction_dims, keepdim=False) + max_val = torch.amax(input, dim=reduction_dims, keepdim=False) - min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) - max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + if mapping_type == MappingType.SYMMETRIC: + max_val_pos = torch.max(-min_val_neg, max_val_pos) + scale = max_val_pos / (float(quant_max - quant_min) / 2) + zero_point = torch.full_like(scale, int((quant_min + quant_max + 1) / 2)) + else: scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min) zero_point = quant_min - torch.round(min_val_neg / scale) zero_point = torch.clamp(zero_point, quant_min, quant_max) - else: - raise RuntimeError(f"Unsupported mapping type: {mapping_type}") if eps is None: eps = torch.finfo(input.dtype).eps From 2371ff84575a8a2fbf23ea6ce8cf1074ebba6af1 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 2 May 2024 19:32:07 -0700 Subject: [PATCH 08/61] Refactor tensor subclass API to also use paramterization (#146) Summary: Also added tests for tensor subclass api + AOTI compilation Test Plan: python test/integration/test_integration.py -k test_aoti Reviewers: Subscribers: Tasks: Tags: --- .github/workflows/regression_test.yml | 9 +- test/integration/test_integration.py | 92 ++++++++++++++++--- torchao/quantization/quant_api.py | 32 ++++--- torchao/quantization/quant_primitives.py | 24 +++-- torchao/quantization/subclass.py | 108 +++++++++++++++++------ 5 files changed, 207 insertions(+), 58 deletions(-) diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_test.yml index 85a79cd5c3..cc72c57512 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_test.yml @@ -31,9 +31,9 @@ jobs: torch-spec: 'torch==2.3.0' gpu-arch-type: "cuda" gpu-arch-version: "12.1" - - name: CUDA 2.4.0.dev20240421 + - name: CUDA 2.4.0.dev20240428 runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: '--pre torch==2.4.0.dev20240421+cu121 --index-url https://download.pytorch.org/whl/nightly/cu121' + torch-spec: '--pre torch==2.4.0.dev20240428+cu121 --index-url https://download.pytorch.org/whl/nightly/cu121' gpu-arch-type: "cuda" gpu-arch-version: "12.1" - name: CPU 2.2.2 @@ -58,6 +58,11 @@ jobs: gpu-arch-type: ${{ matrix.gpu-arch-type }} gpu-arch-version: ${{ matrix.gpu-arch-version }} script: | + conda create -n venv python=3.9 -y + conda activate venv + echo "::group::Install newer objcopy that supports --set-section-alignment" + yum install -y devtoolset-10-binutils + export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH python -m pip install --upgrade pip pip install ${{ matrix.torch-spec }} pip install -r requirements.txt diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 83211cecdd..87efb3962a 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -67,20 +67,28 @@ from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx import os from parameterized import parameterized +import itertools +import logging from torchao.quantization.utils import TORCH_VERSION_AFTER_2_3, TORCH_VERSION_AFTER_2_4 +logger = logging.getLogger("INFO") + torch.manual_seed(0) config.cache_size_limit = 100 -COMMON_DEVICE_DTYPE=[ - ("cpu", torch.float32), - ("cpu", torch.float16), - ("cpu", torch.bfloat16), - ("cuda", torch.float32), - ("cuda", torch.float16), - ("cuda", torch.bfloat16), +# TODO: use this to reduce the number of tests +TENSOR_SUBCLASS_APIS = [ + change_linear_weights_to_int8_dqtensors, + change_linear_weights_to_int8_woqtensors, + change_linear_weights_to_int4_woqtensors, ] +COMMON_DEVICES = ["cpu", "cuda"] + +COMMON_DTYPES = [torch.float32, torch.float16, torch.bfloat16] + +COMMON_DEVICE_DTYPE = list(itertools.product(COMMON_DEVICES, COMMON_DTYPES)).copy() + def combine_parameters(a, b): new_tuples = [] for (tuple1, tuple2) in itertools.product(a, b): @@ -88,10 +96,17 @@ def combine_parameters(a, b): return new_tuples def run_supported_device_dtype(test_method): + """Assumes that the 3rd arg (args[2]) of the decorated method is device and + there is a `test_dtype` kwarg or the 4th arg (args[3]) that indicates the dtype for testing + """ def wrapper(*args, **kwargs): - if args[2] == "cuda" and not torch.cuda.is_available(): + if len(args) < 3: + raise unittest.SkipTest(f"Not enough args. Expected more than or equal to 3, but got {len(args)}") + device = args[2] + dtype = kwargs["test_dtype"] if "test_dtype" in kwargs else args[3] + if device == "cuda" and not torch.cuda.is_available(): raise unittest.SkipTest(f"Need CUDA available.") - if args[2] == "cuda" and torch.cuda.is_available() and kwargs['test_dtype'] == torch.bfloat16 and torch.cuda.get_device_capability() < (8, 0): + if device == "cuda" and torch.cuda.is_available() and dtype == torch.bfloat16 and torch.cuda.get_device_capability() < (8, 0): raise unittest.SkipTest("Need CUDA and SM80+ available.") return test_method(*args, **kwargs) return wrapper @@ -1148,6 +1163,7 @@ def _test_handle_save_load_meta_impl( min_sqnr=35, test_dtype=torch.bfloat16 ): + logger.info(f"TestSaveLoad: {api}, {test_device}, {test_dtype}") m, k, n = 32, 64, 32 class test_model(nn.Module): @@ -1180,7 +1196,7 @@ def forward(self, x): # load model structure with torch.device('meta'): - model = test_model() + model = test_model().to(dtype=test_dtype) api(model) # load quantized state_dict @@ -1407,5 +1423,61 @@ def test_autoquant_multi_input(self, device, dtype, m1, m2, k, n): sqnr = SQNR(out, out2) self.assertTrue(sqnr >= 30) + +class TestAOTI(unittest.TestCase): + @parameterized.expand( + list(itertools.product(TENSOR_SUBCLASS_APIS, COMMON_DEVICES, COMMON_DTYPES)), + ) + def test_aoti(self, api, test_device, test_dtype): + if not TORCH_VERSION_AFTER_2_4: + self.skipTest("aoti compatibility requires 2.4+.") + + if test_device == "cuda": + self.skipTest("AOTI has some issues in cuda test right now, skipping") + + logger.info(f"TestAOTI: {api}, {test_device}, {test_dtype}") + if api is change_linear_weights_to_int8_dqtensors and test_device == "cuda": + self.skipTest(f"{api} in {test_device} is not support for aoti compilation yet") + + if test_dtype != torch.bfloat16: + self.skipTest(f"{api} in {test_dtype} is not support for aoti compilation yet") + + if test_device == "cuda" and not torch.cuda.is_available(): + self.skipTest(f"Need CUDA available.") + if test_device == "cuda" and torch.cuda.is_available() and test_dtype == torch.bfloat16 and torch.cuda.get_device_capability() < (8, 0): + self.skipTest("Need CUDA and SM80+ available.") + + m, k, n = 32, 64, 32 + + class test_model(nn.Module): + def __init__(self): + super().__init__() + self.lin1 = nn.Linear(k, n) + self.relu = nn.ReLU() + self.lin2 = nn.Linear(n, n) + + def forward(self, x): + x = self.lin1(x) + x = self.relu(x) + x = self.lin2(x) + return x + + x = torch.randn(m, k, dtype=test_dtype, device=test_device) + + # get float reference + model = test_model().to(dtype=test_dtype, device=test_device).eval() + ref_f = model(x) + + kwargs = {"dtype": test_dtype} + api(model, **kwargs) + + # running model + model(x) + + # make sure it compiles + example_inputs = (x,) + torch._export.aot_compile(model, example_inputs) + + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index a830d52d78..2dcd935912 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -20,7 +20,7 @@ import torch.nn.functional as F from .dynamic_quant import DynamicallyPerAxisQuantizedLinear -from .utils import TORCH_VERSION_AFTER_2_3 +from .utils import TORCH_VERSION_AFTER_2_3, TORCH_VERSION_AFTER_2_4 from .subclass import ( Int4WeightOnlyQuantizedLinearWeight, @@ -117,19 +117,27 @@ def apply_dynamic_quant(model, filter_fn=None): change_linear_weights_to_int8_dqtensors(model, filter_fn) -def _get_subclass_inserter(cls, **kwargs): - method = kwargs.pop("method", "from_float") +import torch.nn.utils.parametrize as parametrize + +def _get_subclass_inserter(cls, enable_parametrization=False, **kwargs): + constructor = kwargs.pop("constructor", "subclass_constructor") + from_float = kwargs.pop("method", "from_float") def insert_subclass(lin): - lin.weight = torch.nn.Parameter( - # cls.from_float(...) - getattr(cls, method)(lin.weight, **kwargs), requires_grad=False - ) + if enable_parametrization: + lin.weight = torch.nn.Parameter(cls.from_float(lin.weight, **kwargs), requires_grad=False) + _, args = lin.weight.__tensor_flatten__() + parametrize.register_parametrization(lin, "weight", getattr(cls, constructor)(*args)) + else: + lin.weight = torch.nn.Parameter( + # cls.from_float(...) + getattr(cls, from_float)(lin.weight, **kwargs), requires_grad=False + ) return lin return insert_subclass -def change_linear_weights_to_int8_dqtensors(model, filter_fn=None): +def change_linear_weights_to_int8_dqtensors(model, filter_fn=None, **kwargs): """ Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight` Tensor subclass, effectively applying the same form of quantization @@ -141,11 +149,11 @@ def change_linear_weights_to_int8_dqtensors(model, filter_fn=None): ) _replace_with_custom_fn_if_matches_filter( - model, _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight), filter_fn + model, _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight, enable_parametrization=TORCH_VERSION_AFTER_2_4, **kwargs), filter_fn ) -def change_linear_weights_to_int8_woqtensors(model, filter_fn=None): +def change_linear_weights_to_int8_woqtensors(model, filter_fn=None, **kwargs): """ Converts all linear weight tensors to the `Int8WeightOnlyQuantizedLinearWeight` tensor subclass, @@ -154,7 +162,7 @@ def change_linear_weights_to_int8_woqtensors(model, filter_fn=None): """ _replace_with_custom_fn_if_matches_filter( model, - _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight), + _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight, enable_parametrization=TORCH_VERSION_AFTER_2_4, **kwargs), _is_linear if filter_fn is None else filter_fn, ) @@ -170,7 +178,7 @@ def change_linear_weights_to_int4_woqtensors(model, **kwargs): _replace_with_custom_fn_if_matches_filter( model, - _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs), + _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, enable_parametrization=TORCH_VERSION_AFTER_2_4, **kwargs), filter_fn, ) diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 4d6a7666aa..c3089224de 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -47,6 +47,13 @@ ] + (_AFTER_TORCH_2_3_ONLY if TORCH_VERSION_AFTER_2_3 else []) +def guard_dtype_size(tensor_arg, arg_name, dtype=None, size=None): + if dtype is not None and tensor_arg.dtype != dtype: + raise ValueError("Expected Tensor argument {arg_name} to have dtype {dtype}, but got {tensor_arg.dtype} instead.") + if size is not None and tensor_arg.size() != size: + raise ValueError("Expected Tensor argument {arg_name} to have size {size}, but got {tensor_arg.size()} instead.") + + _DTYPE_TO_QVALUE_BOUNDS = { torch.uint8: (0, 255), torch.int8: (-128, 127), @@ -493,7 +500,7 @@ def quant_int8_dynamic_per_token_linear( x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype ) if bias is not None: - mm_out += bias + mm_out = mm_out + bias return mm_out @@ -554,7 +561,7 @@ def quant_int8_per_token_matmul( return y -def get_groupwise_affine_qparams(w, n_bit=4, groupsize=128): +def get_groupwise_affine_qparams(w, n_bit=4, groupsize=128, dtype=torch.bfloat16): """This is tinygemm specific, we'll keep this for now""" if groupsize > w.shape[-1]: groupsize = w.shape[-1] @@ -570,15 +577,14 @@ def get_groupwise_affine_qparams(w, n_bit=4, groupsize=128): max_int = 2**n_bit - 1 scales = (max_val - min_val).clamp(min=1e-6) / max_int zeros = min_val + scales * (2 ** (n_bit - 1)) - return scales.to(torch.bfloat16).reshape(w.shape[0], -1), zeros.to( - torch.bfloat16 + return scales.to(dtype=dtype).reshape(w.shape[0], -1), zeros.to( + dtype=dtype ).reshape(w.shape[0], -1) def pack_tinygemm_scales_and_zeros(scales, zeros): - assert scales.shape == zeros.shape - assert scales.dtype == torch.bfloat16 - assert zeros.dtype == torch.bfloat16 + guard_dtype_size(scales, "scales", dtype=torch.bfloat16, size=zeros.size()) + guard_dtype_size(zeros, "zeros", dtype=torch.bfloat16) return ( torch.cat( [ @@ -661,8 +667,8 @@ def groupwise_affine_dequantize_tensor_from_qparams( return w_dq -def groupwise_affine_quantize_tensor(w, n_bit=4, groupsize=128): - scales, zeros = get_groupwise_affine_qparams(w, n_bit, groupsize) +def groupwise_affine_quantize_tensor(w, n_bit=4, groupsize=128, dtype=torch.bfloat16): + scales, zeros = get_groupwise_affine_qparams(w, n_bit, groupsize, dtype) w_int4x8 = groupwise_affine_quantize_tensor_from_qparams( w, scales, zeros, n_bit, groupsize ) diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index e9b532d6d6..7de4a6169f 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -190,6 +190,29 @@ def __torch_dispatch__(cls, func, types, args, kwargs): args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), ) +class ConstructTensorSubclass(torch.nn.Module): + def __init__(self, *args, **kwargs): + super().__init__() + self.args = args + self.kwargs = kwargs + + def forward(self, x): + pass + + def right_inverse(self, tensor_subclass_instance): + fields, _ = tensor_subclass_instance.__tensor_flatten__() + return [getattr(tensor_subclass_instance, field) for field in fields] + + +@torch._dynamo.allow_in_graph +def from_qtensor_components_int8dyn(*args, **kwargs): + return Int8DynamicallyQuantizedLinearWeight(*args, **kwargs) + + +class ConstructTensorSubclassInt8Dyn(ConstructTensorSubclass): + def forward(self, int_data, q_scales): + return from_qtensor_components_int8dyn(int_data, q_scales, *self.args, **self.kwargs) + class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase): """ @@ -197,13 +220,16 @@ class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase): linear op to a dynamically quantized linear op with symmetric per-token and per-channel quantization on the activation and weight respectively. """ + subclass_constructor = ConstructTensorSubclassInt8Dyn @staticmethod - def __new__(cls, int_data, q_scales, transposed, shape, **kwargs): - kwargs["dtype"] = kwargs.get("dtype", q_scales.dtype) + def __new__(cls, int_data, q_scales, transposed, shape, dtype=None, **kwargs): + if dtype is None: + dtype = qscales.dtype + kwargs["dtype"] = dtype return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined] - def __init__(self, int_data, q_scales, transposed, shape, **kwargs): + def __init__(self, int_data, q_scales, transposed, shape, dtype=None, **kwargs): self.q_scales = q_scales super().__init__(int_data, transposed) @@ -266,14 +292,15 @@ def _change_shape(self, shape): ) def __tensor_flatten__(self): - return ["int_data", "q_scales"], [self.transposed, self.dtype, self.shape] + # note: the order of args must match the order of args in __init__ + return ["int_data", "q_scales"], [self.transposed, self.shape, self.dtype] @classmethod def __tensor_unflatten__( cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None ): int_data, q_scales = tensor_data_dict["int_data"], tensor_data_dict["q_scales"] - transposed, dtype, shape = tensor_attributes + transposed, shape, dtype = tensor_attributes return cls( int_data, q_scales, @@ -284,7 +311,7 @@ def __tensor_unflatten__( ) @classmethod - def from_float(cls, input_float, qmin=-128, qmax=127): + def from_float(cls, input_float, qmin=-128, qmax=127, dtype=None): """ Method used to convert a linear weight tensor to an instance of the Int8DynamicallyQuantizedLinearWeight subclass. @@ -295,6 +322,9 @@ def from_float(cls, input_float, qmin=-128, qmax=127): Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight) ) """ + if dtype is None: + dtype = input_float.dtype + # because we call transpose in dequantization w_int_repr, w_scales, _ = dynamically_quantize_per_channel( input_float, qmin, qmax, torch.int8 @@ -308,16 +338,27 @@ def from_float(cls, input_float, qmin=-128, qmax=127): if not issubclass(cls, Int8DynamicallyQuantizedLinearWeight): int_data = int_data.contiguous() return cls( - int_data, w_scales, False, input_float.shape, dtype=input_float.dtype + int_data, w_scales, False, input_float.shape, dtype=dtype, ) +@torch._dynamo.allow_in_graph +def from_qtensor_components_int8wo(*args, **kwargs): + return Int8WeightOnlyQuantizedLinearWeight(*args, **kwargs) + + +class ConstructTensorSubclassInt8wo(ConstructTensorSubclass): + def forward(self, int_data, q_scales): + return from_qtensor_components_int8wo(int_data, q_scales, *self.args, **self.kwargs) + + class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight): """ A Tensor subclass that when applied to a weight used in a linear op/module, changes the linear op to a weight-only quantized linear op with symmetric per-channel quantization on the weight. """ + subclass_constructor = ConstructTensorSubclassInt8wo @staticmethod def _quantized_op(act_mat, w_qtensor, bias): @@ -335,12 +376,21 @@ def _quantized_op(act_mat, w_qtensor, bias): return y.to(orig_dtype) +@torch._dynamo.allow_in_graph +def from_qtensor_components_int4wo(*args, **kwargs): + return Int4WeightOnlyQuantizedLinearWeight(*args, **kwargs) + +class ConstructTensorSubclassInt4wo(ConstructTensorSubclass): + def forward(self, int_data, scales_and_zeros): + return from_qtensor_components_int4wo(int_data, scales_and_zeros, *self.args, **self.kwargs) + class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase): """ A Tensor subclass that when applied to a weight used in a linear op/module, changes that linear op to a weight-only int4 quantized linear op with groupwise affine quantization on the weight. """ + subclass_constructor = ConstructTensorSubclassInt4wo @staticmethod def __new__( @@ -351,9 +401,12 @@ def __new__( shape, groupsize=128, inner_k_tiles=8, + dtype=None, **kwargs, ): - kwargs["dtype"] = kwargs.get("dtype", scales_and_zeros.dtype) + if dtype is None: + dtype = scales_and_zeros.dtype + kwargs["dtype"] = dtype return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined] def __init__( @@ -364,6 +417,7 @@ def __init__( shape, groupsize, inner_k_tiles, + dtype, **kwargs, ): # the transposed flag tracks whether the tensor subclass has been transposed relative @@ -372,9 +426,7 @@ def __init__( # square matrices can cause issues otherwise self.scales_and_zeros = scales_and_zeros - self.groupsize = groupsize - self.inner_k_tiles = inner_k_tiles super().__init__(int_data, transposed) @@ -465,10 +517,10 @@ def _change_shape(self, shape): def __tensor_flatten__(self): return ["int_data", "scales_and_zeros"], ( self.transposed, + self.shape, self.groupsize, self.inner_k_tiles, self.dtype, - self.shape, ) @classmethod @@ -482,7 +534,7 @@ def __tensor_unflatten__( tensor_data_dict["int_data"], tensor_data_dict["scales_and_zeros"], ) - transposed, groupsize, inner_k_tiles, dtype, shape = attributes + transposed, shape, groupsize, inner_k_tiles, dtype = attributes return cls( int_data, scales_and_zeros, @@ -495,7 +547,7 @@ def __tensor_unflatten__( ) @classmethod - def from_float(cls, input_float, groupsize=128, inner_k_tiles=8): + def from_float(cls, input_float, groupsize=128, inner_k_tiles=8, dtype=None): """ Method used to convert a linear weight tensor to an instance of the Int4WeightOnlyQuantizedLinearWeight subclass. @@ -506,9 +558,24 @@ def from_float(cls, input_float, groupsize=128, inner_k_tiles=8): Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight) ) """ + if dtype is None: + dtype = input_float.dtype + + int_data, scales_and_zeros, transposed, groupsize, inner_k_tils = cls.to_qtensor_components(input_float, groupsize, inner_k_tiles) + return cls( + int_data, + scales_and_zeros, + transposed, + input_float.shape, + groupsize, + inner_k_tiles, + dtype=dtype, + ) + + @classmethod + def to_qtensor_components(cls, input_float, groupsize=128, inner_k_tiles=8): assert groupsize in [256, 128, 64, 32] assert inner_k_tiles in [8, 4, 2] - orig_shape = input_float.shape orig_out_features, orig_in_features = input_float.shape # padding @@ -521,16 +588,7 @@ def from_float(cls, input_float, groupsize=128, inner_k_tiles=8): # quantization and packing input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor( - input_float, 4, groupsize + input_float, 4, groupsize, dtype=input_float.dtype ) int_data = aten._convert_weight_to_int4pack(input_int4x8, inner_k_tiles) - - return cls( - int_data, - scales_and_zeros, - False, - orig_shape, - groupsize, - inner_k_tiles, - dtype=input_float.dtype, - ) + return int_data, scales_and_zeros, False, groupsize, inner_k_tiles From 5364de679d78446116f1f94f4db6e90eb1d38160 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 3 May 2024 08:15:22 -0700 Subject: [PATCH 09/61] `quantize_activation_per_token_absmax` use general quant primitives (#193) Summary: att Test Plan: OSS CI Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 28 +++++++++++++++++++++ torchao/quantization/quant_primitives.py | 24 ++++++++++-------- tutorials/quantize_vit/quant.json.gz | Bin 16168 -> 20283 bytes 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 139116def2..90fd8f8bf0 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -126,6 +126,34 @@ def test_choose_qparams_tensor_sym(self): self.assertTrue(torch.equal(scale, scale_ref)) self.assertTrue(torch.equal(zero_point, zp_ref)) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + def test_quantize_activation_per_token_abs_max(self): + from torchao.quantization.quant_primitives import quantize_activation_per_token_absmax + input = torch.randn(10, 10) + quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + + mapping_type = MappingType.SYMMETRIC + block_size = list(input.shape) + for i in range(len(block_size) - 1): + block_size[i] = 1 + dtype = torch.int8 + eps = 1e-5 + quant_min = -127 + quant_max = 127 + scale, zero_point = choose_qparams_affine(input, mapping_type, block_size, dtype, quant_min, quant_max, eps=eps, scale_dtype=torch.float) + + quantized = quantize_affine(input, block_size, scale, zero_point, dtype, quant_min, quant_max) + + self.assertTrue(torch.equal(quantized, quantized_ref)) + self.assertTrue(torch.equal(scale, scale_ref)) + + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + def test_quantize_activation_per_token_abs_max_zero_input(self): + from torchao.quantization.quant_primitives import quantize_activation_per_token_absmax + input = torch.zeros(10, 10) + # make sure it still works + quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") def test_quantize_dequantize_group_sym(self): diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index c3089224de..90316e1557 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -330,21 +330,23 @@ def dynamically_quantize_per_tensor( def quantize_activation_per_token_absmax(t): - n_bits = 8 # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1] - - scales = t.abs().amax(dim=-1, keepdim=True) - if scales.dtype == torch.float16: - scales = ( - scales.float() - ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range) - q_max = 2 ** (n_bits - 1) - 1 - scales = scales.clamp(min=1e-5).div(q_max) + mapping_type = MappingType.SYMMETRIC + block_size = list(t.shape) + for i in range(len(block_size) - 1): + block_size[i] = 1 + dtype = torch.int8 + eps = 1e-5 # Note: the original smoothquant does not clamp to qmin/qmax here, # but some of the tests with bfloat16 ended up with a flipped sign # if we don't clamp. TODO(future) look into this further. - t = torch.round(t / scales).clamp(-127, 127).to(torch.int8) - return t, scales + quant_min = -127 + quant_max = 127 + scale, zero_point = choose_qparams_affine(t, mapping_type, block_size, dtype, quant_min, quant_max, eps, scale_dtype=torch.float) + + quantized = quantize_affine(t, block_size, scale, zero_point, dtype, quant_min, quant_max) + + return quantized, scale def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype): diff --git a/tutorials/quantize_vit/quant.json.gz b/tutorials/quantize_vit/quant.json.gz index a207cefc5fae899263e3d00f4d85fcb3832b37ca..8caa43d81e16baf597c0b2be1d8991c496e2aaac 100644 GIT binary patch literal 20283 zcmb@tbyU<_`2Tw>6af)LS`ZjQx*3oVkZz<)>29P$i2>;b0qO2;kPZi*)H+fl8 zbXPH)|2@NL`v=b^xb9+fkF@RGhmHY!(e1>AiC&Laj~^3%N`~`c=ZPswMh6|vzE!RW zER21fp9dE&80Ek>r~i~*Z(gKRWnMDX@Mr1opYGn?#Ffdl-j!49s7^bclqPOIxb>3i zAT`%WMekeO!-8HtjxY4>Tz^QKJ?##>62st6M-G2+oSZE8Tex$h{=HcBy4va9^SZq1 zee3z-`(L@%$;4kxqwXBdr*OSLw(k3f%dCyxUhI9w8m2ww{5mpud9dlqaprRAwHkC9 zSu@QfW85D%v04?kEdBF;yV$qr@C0>GuXjLak4u=VcTdSd8*g8JTFcC8Gcg9&lkGs8!sDdU>7c*Ui z|D5!DaXP&}m_FX7*ZJ8CbMR{9KHI;1yC2mLz5G4xQRC$Jo-g5TjMMR!yY0!@#q_U@ z%jE`J=ZCVM34?#P&NoY6R(`A8l~>1}bEp>>TKRJtH#`6Ll95+pB}jt zQXH`E(lA#Pav(Ij@=Y5Xb7}XhQQ_GQb~ou@TCI?y9+{+|_`Zjk;`#@Es1~d5idOg7JaT-x>~J7z5Bl*? z_9HtU(@P9)I*7zpph-gf7Ou4Y=*FWJG}+e!-%3lPH1iz0qR;{y>jh>v{`~CgfBi9t zXJ$_2S$oG%j(~;iK{l-M!QxCE7(VPp#g{;K(@_TF7KRxm1D$vO2%}r$(+TTjzwDCP zUCAN8@O~4)rSq7nJ9NKPX!E?#>0w@-+hkqObPML5G&i35D8uw~wVannOi(?xx2jBH_WLS~z67RQ)n;um=Gn$zc%)Wbdoj*28aigDcWWI@1j>k(1ueBj-F(^Wnh_~Vv zhr9BTO|HegVXQ*kR#-X6wRc#8Q$c=XQ>;VOi~CR|%B>=4C;Q~YzxZrz9qyah@M zaog-~&2tf{dDeaT;-T~1*mkf>aDdAxG|aMH;@p4RjqgRljNPgDt4?tQ{-z6BIz|>w+D_O=_KELX{(0m4&|YtRX8+56 z^zUwa>yyKp{tsQhH=*fgfuy8Ezs|Y~PA8Z5TdzuN^rM0f7_3%D4x{!rj~sA!SB&gj z&-Mb}>AXxQ3E%8}8XhWW*< zxs=AgC|B5CHHK`{%x%@g&Rft)JGx-%d6qVX&nKmIs#dnG> z^4g9|S>g)#^h><{@~U>U=-6`fvUmPZrHR$fo*iG~Uy~-wOW5k-Kn+#j(=WacEO_cr zRoMkzHZ&6s4p_85B%3hW-|e=wpQ5PH$GJJO!g_0b{aZ>39agY|lV)u%YknvM>U zF#cd!3Cw$In9P|Yci*6|Q@GaFEtK%KUk;c4>0WVKDi}T1jsIg$0Kk(r! zfm)2kUjDJKaf+$R%KPy}jN|QI#ULwJ6iqA{JrAi2BUPLr#$Rr2*@NixDYG*&-&A!S zeWkdLBi;wcM~g>Czk@nYPiEES6Rb0aQJOFn=HnWL?{lBfqigM7+OU>yIhIotl-Y#k z+9tsssp_waum?NkvY_pMs&pES-&52lVJfd6tgvEtavOzG{JhWMI1pvjDJw5#D|II5 zFqP5eHvXZ{ALU44Rvgd$E(eLXnz(4=_rb`8?4{LSiXcCwcAOxpFBM|+*g_tU(FXOa zy%N!nAn`AQW!x%-@~!f2jjGcf9P5c$=@eV$@a7jv(2&yEy_v%Wa3Y z^fK{Fa&jl;K4nIM+U`iScu01|;d9YGr>~vvwi(%ixI{8df8l~f%k^2Yv9?kgc$!uc zjx*1mf0bsa^ZW;TrhH`gS5cMSi-+mGAE(&36s6Pi_jp;x@cCG)wTur7{g;bItQwvP zS~|@{Ax2yoVP97q`s#7yxV)5?7z_O^`vxK-i%9uV_ZnxyYmSSoothjxn)*3Hio1gL zR}OZ1k1e>b*wL2eAgN3KN3s2w)_+W!k8JtBP8`p@WZIn$4U3@Is|kx3%6z%2>Gf%? zCs%tgg7y23YqayiyHkBfUdc%3!?oQEoMNWo<&VZME~&`RJb-+RN~^ozJcM%-K!@3tG+<9&195B-x<~Ow1ptuT?(qtM0Zo!+VJYp1ur zSPk;4>-#cMw@fp1JV(DOxU5cbPlROtNN3%D7d?@;c|CH!>Wx87L2ZQV(R8Qt+1XL$ zneoxIz+i=x+LjI-Hg7-R;CqA3KOnEunvY0Wm>+spe4co}lwodqY1BIMdA;A|? zDR*oY@uP@@ct%dUNY;G+JlEE0#(}Mvl)JdtUP?=8Rg$9w#j{qER<~c&P{--+ko55} zuj>weakGV>e4=2XY4z)P{!DbRf;{XSajE*cKk*+(Zuo>gX*yO>JDnPLA9B~$2T zD_2w^(pHt%TnsHCv;zJxqknV2}MPE58gaiSTa z9ym@A6$xSKnCvv0!UR}2GAa*;+=t$>Ccp}`~OOW;xM^fxHRQ z1q3AJh#p{`nQt0kNl3fardMxBD8$800M55nNYP+Vf=wrkW4rYmhv2;qlK#84X=o#l zxh(~@E7iLme&SDxpdG`8D=eoczycPd(!BM;sHmJx3CZ#DAI-9Vw;^K@SHsrAQCI3U zHksn*X&vq{Hj%>f1c`={&r+hrvd%m1^{(N>?=zTX_k2&|D zliUQDTDET2Ly3|?^jp5;sSnP9@A)8M z9(Yb7Y|77Ota>VnR$-96gZ;W#wwJzzDvy-jryIZf6vqwc#Q@q!IFZhX5lAiHr;k0P zdCRq9V_>&yB|U#+>%txe^1&MIizI~?ei*NVf1`a8oQX0huBP7kcwPJy#HM6YBahEY zV|2v?2ax9JPokhQMIR~dQS$NpW7Y$FD>EL#cep%%FZ zR12eHH6G`AY6`#CBz@ww!?r4g)zp3vIU%r@iR`qpk9Jxh3s$OzACItqe@FJB{Eb0v zLAEZYN-?68)vzUTveOp9r|g!or9$ap;|3xw*<`F9FiS$J+z%7_>H!bqK|xuXyD2JN zS4ZkAf}GcKl1wO;ghrr)QFt$it0urie@Y5}U#Ir*>mbegl)TFsyN_i7z`{Ns@ljDzFSwoa(ir{wl3nL}}u zy@L~@XsAQFfG-c{SXx+mgxSY0vu7+EUJc@;R!5baIS*^=zMU2XcsG{7CQx**ej7w@qT@kr*hKdO~^!PdyfS8TJ!rqFmW#CV4dLm9jixsOB| ziEMU^rrd0@WkgMPCNsMxjGknAh+hXl|DMY{=k>S2@q-U*?{)u_Z1;@6?n&Ho0|n$k zW*?oTF{aqRgSORuzd2fl2Q)gHqo|fPc$of2=Ej9ENLZ3DX{l-L$NceZ-C8tZ={^n9 z0Bb_4QlVkGyy?SdFSQdzrPfb47M6(Ik?6K^W3x z+CM~*@5-1GnrYJ^){g=1yh08aIy1Lnl0vN~;D)f(J(AWXlti>v7R%~Z5QVOA)n;Z4 zZo6R}bai)ZkRfGBwR#LAkJUUtbGBL#(hHBube}8P8;3!MqiG{#rI54nAsf9O|QTfGnsoZ=_3!L(?IcHcSh&21qX(;e51nUIcCuQ z=vqqGctypaCY6L?u${a8r?W%vH(gRv=?l>~upUV*p$n#ZTPU+AOZX>K(Bqx>OO4?Ct#GU26ZP+1 z|4)oPj#sc@!tt45EokU;b90gNWyCYj)A7}A&xesmhXx${q$OQ@9Tzp)o;z=impXS+ zO89+qOgP>)zFgWqI>IC!!(Ez=%4Bw$+w4A+NFCff(bgAf7I68CaZbr^Vm|lTBwemJ zi0!Z~cuwI7^rAd(_2Z{50dA&7CLt`hNY`Mc!?ij33iHSzEn=_OeauA%yo|L({jrh0 z6i&JK#vBea4;RbVH`?2CR$uW`^5J00ur^OamKtamR|CHuKn|sZFG%C&LrPS}gEqED zbeCv-DOX+UP?Tql2)wK}oBuADPJhagnHjNEIy~mT;I(J_WXFB=d)b}6E?x_T?{dw7 zyEiJ(Y-`1Tqf-QTBIs=UP2|Yo^U;x@k&$^OurnuPL9^x%4I&&b9!-zlmkwA4JL0`< zW~lJjH8Nh)-xtb`CQ3`n_1qz&KP;$slFUvC>AIZfFOHysKkxJ5mO1Ox3@DupkoM&8 zxmu`E^D&AZDXfUAy5Hp^@zzokp29tiT%Y)m5R}p9N06%(e`Oc(eA7>>!`raAA4^S9 znpix?!%Vk6&u^m`cf-3igKMi7r&|VO|1)34p~M}dCbdZqrIBNPsUzGjC(f6Ad_km( zQlhFsy-2&5K*o4YFeG+wm~$>QM-ZP#x#@4(2$c@O;&AQr$<3^7XZbV%PrcF267u+q z@dnI2-H1Wcm5z(KMCE=OzQNd2jM)LPTdj#_o&5%t!IZvxq8XF@G}e5W`j63Ohg`2$@B&lvgz252Z#~GmEFds>wL;`99@jXB$%Q)&9P{^2?5BWFK+h7 z_sv7sNcg%G-0S{9A%a>cy5Gw=7+=fc2GMs;A@Uy;8^iSnB>vgjFW4pAKNXH?0fO>{ z_;e2OpMCPc%fJRv-hpJqMajOOexdaly-R7F61^-i;YHR5RU4w%ES?Y z^JP?v&>~Pq7L#0U(1of7qVjV{uB#Q1tK4t}HG5)7HN0XPhD!ImY4|(G$}^F3oqaQ+ zwn?CsA)6f?m8zp>ADui|fh`b0CR-{8%}b43tLp`q>lAFFI&C$f(Sjp!9yd>Q`f(rU zwLmL{_<0IdoS6~vAKr-nKnt*!du`9-d8Oq07|pcM^H>?TtgTr9fazSyszzKsQGHa#^y#U z>k(7?B-}Z^d_$@QT+BTv8w;$AS$(_7y#M z@I0@-0;CSRZ1sb+-aw!hkV@y9l-|rZN*0jc2%b#H9IFz1(YzNG?oBTD;7YT;{Ugys zjQ`L@qic$~5B7O52uY;$&qb;~GH1`G{ya1zBrn!N;G_a1@QpgLgwF3N{p{w*g!ps`&-CIcNtL{0-7upMBqiI&wVUF>;*fHXQIQPTmzcrCs3r=ZaCEHMr~$b zZ9wJ#wV%=RRK^?pQ|m@7cSXd4g=5S_n*k!frY`gP_?UsP{A{u$kVQi2q57FhZ!cHL zF8B#v&>TFltE#bN`cG=wC)6qeU;@(hM(fY9%SZyL_tj2DUa+h?>StpiJAwp$AmnlY zf3kY{j->EffoT8@KSIXX2x^r&W~CMWR|F3RQ(jzDp)OQjr2^>+R-XyUnaU=!@#_5- zxrE!!+t`%=lyxel+*ST6BV5AAtxf_jkg=2tyh$Nv#cb{Ub|9XO35}A%bM@OQ)_~!UR@Kv z8f&#jcG^#lbHQLwPC&t8Yc4?bi zECKeS8gpWdNyvUCs!6L^Wri(%6xRDT#prs|y8~c#8KdjX@D8&|$EQQVRm*FW=+5 z+Gze62Cy|8SryR~+3Fk^tupPwPq5#!Q>a(sFcBsMwnrhQYUO>UA7XlsZUC!uSZI_L zi4Q@agf7dY=<2h4%A@cr$9xawFs0->n9B77$QZ6St& z0&z-XNcaI1W}#MLn;~zo0}89|y1d20xf%@#rxG~ZrI8@n&Qqjp+9SdTKpkM1{5EQz zGaYc@?vSqW-Z*t@0aa5`U5_h)b%eCIY9N^=pJ6TjycoQox8B+pvz3Fz#K5qj=Lk0n z;YQ?o3!SB>xPdwWmI}D25S9v#K^4sxQWY6t-Vx@dyCTnIq;o-cox5>`eipG|7*b+V zxr`xmO3AK?ubvkeHwL~4e4>YljD2MOHYVAA1C$ZV23w%nUfV(l$1xavi(Ol$oh0D8 zA~>a0s@XBJij4_D!$sJ1>T+|vdglkAh;Gc&!wxlIc$7BYSf>F33EdvhEK`~fXkV{Q zm(+qFag%nD^Es}%$20U+kqz4Bwug2maY)O5XUP)uh&yQ?&(e9i+k7>7{q?Q1HB}HPRl(PLM%`}QCTdj< z2B*~tJ`uc?{<4HW@56pN=;v+lW}ca)bXpLX$jTYRb*!jza^{1L4<~n4E?r%)-&Qmd zrpS0Yxw^V|lqH0!L`~W?!sYmM@}y&!@g!^r>F`@<|k5FMA>x zN4V9h_$Q~9^OTakSGw(GuM6A*D{vy}r~baQUe>N(wcR)*qYipbKf;^x+qOPv=&+w< zYhy(&F{i$Q5JsRGfi3bm_qjZO=nDmJ|<3S67lsS65rt^NUVB_lsS6ll~e73!h~<;{qlZ zf5yNK(oN^Kd(Oo_W_Dhq>|d+Zk!mJ~fF`EB1uh;Dbj;%nxJ~LaqM5{zmk_ZJspu z^pCljH2y+P8rdx)!p5Vk3%>HzB{Srt80jwPqYt@@TTfp8T1{D%B3(NS(%x%2Tx{Ms zg{UjKHmI=h@JeEzDzKWZB|EpzSlMfL3p6A>tkc&Qt;OHBu{AiADc)+&3_D>iD!Xl0 z7yNiRP*gnsFnKjjcX0Ozkl91pYcfNF05ZeBpxCGOxz|Z5H4`JjGQWQsjkkz7-Na-u zSj>#O#M)#JtoFp6mw1czcjKa;g#ZISwP)#5p9)b?DDFjHxLARzM#8KgpLy>2tazKa z#Bd$8LyA|LVYj&z<+OI;v9{OnkTM ze~B&Sg;6evSpPI$xK#I1PLzDd6~oMvZKge`^9jlHk&u#2#S$s^VngnRI`3CP~}VDjKdn%u(bDKC}P&9`9J z^(`!WY(^iwFO(iG-w`BgIV)G?nRc&tLxk}>j44bHTCBG1Ljp@%IIir_C~awrjdl2)U;&`WGyZHyaRd=8;W^Lzb# zn*LI=X)CS37sZ<1s3Tl_8KSO0a+@|a>b#v3mrXn zqdum6U#gQ)HFr;pZzjjnY7R4g&I8K&Wu#qtFf@s(*J_7&A$0AHYcd?TSRtJVO$(v! zvg}h^9>s5Chq0>3U!r39(4YAr?qgh6 zI&7DM*IA#10`{8RTpV9ph2HEC`gIRVJj-uFLw~;lgFp5wEA``>YaWzhmA>z73)RPMk8~iBeFY zh?N-+2FI6J1P0?4sY^#VloJN!Vn_e*CJ#5|RC?M)ezFQ{B?=;7fnXDe#l|tfGmE;& z3m;NehCeWPwqH&`>>6mX9wFa`<}Z}wCy?nfiKZO5N$(x_1GiX5GiaX^!oAh!gPZ1b zf-$W*vv@B{ODj^>5FXB6T_5yecM+G4{!e^qVdd!C?T-SLB4|N^p%A9uToznER8in8 zCLt z4F=94WO9Kup*lFj;{gx2$R$4rM)aWB4L^ltc!%cuTS2L6&#j=Y!sVlsqvARkVp{Di zaKB8gBmzCzal0-O{7?e04XC1b%{IC64!L{-O~$>>=L0wz}?`?`<6pSDXM{VwM5#Jlp{oEndJ|tw_MN$uszvVM*WYsIK zO-FoDR@{$RneTn28g#Ms*MEPEm=iG3KjLO>|IcC(947ZZlo z!t`P2fufQ%x5?#DT+4WD+F9D~iI5dZ;A;|wiuMp8q+2DvOw_~aJ zV{7qo5&JDcY6}>Y-SnVFjj7RnYzO(Hipra2P$9zW0EkB#`UJQxlay;m|7g>bSHt;r zV8h(1W%!uhXUqgVW!C11rR!dTw(xZE zk~KRN6;qRfd?tYA7K#WtQEXEn9*lzP7+p;3nWW9}j7_vkWO;d^P4uWu2gm{)`EM(b zGH>CkHmq@*?`pdHVo^%<$?yp@kV1Z)&!t4q#$x?7D1!P@Igo+KGVZ9gOo6ZG!ylMK zjatGh)mi2oWL{v1fvo)b-V%luBovrn;mFrshPP1_jECdz{Fp?CFjVr)4BGZh-Wz-z zt7dB>TxhdgPZ-*Qj6_>hIY_+d0vJUhHYGR)$TRg>v%J7r9E)GbG@>faUhgB)%m+u^ zwkhvjum;OuwMBbuO%Pg57+MO`??RKTE2P>n*g|jCfCrdDjS5Ro^?U8#Vsz_Fh!8N) zpm2{ZKbN`}86%jBE{TZ5j14-QhW&z$F4Sy&$p~fzc9L2-Nf&8&k>d)ZII8DF6r^zT zy5V|D!3aufWRfs^Ta7$^`MVae4|2+l=2dI*tGccsQtGEZ6vDn$zjLlH1peN6=2}+g(94q*@?@ZkO+qbu)dRrD3%RH z7aK!QULz|a#Aq4(d0Kz^zgy6C_*@JsS zDB)K>2TEwUqFoQ z5u1|Q(NIU=SZorzgZWSD%$>dAB@#D4Xo1cB$rO_?F@r(E7xX9itoPi|)mz;N?hq88 zSZJBBEt7mqK$V`O44lP^Q>L8=#~SDBpLRX>^-agJ4xlo#9E9|@P|9;$WzG=O?`*#q^ z@kQs6QTeHE`MGAQQ)8l!D5pxFm_BD1J%!6JU2;<`QbOhNh{EE-+*LGB+#I_`9=udWtjr>XfRGpQj zc(Y&es)r1A7wo^>|6>{V_F{a}+4f?s028jR(T}$`0DZ}K)$U&`A`Gp`l!&14+DHt-Bqz%i9+66Ublu zyw$;`Gox{b5n_GE?K7&m(>wCBJ{xC7iN~)GbNZuZ)p59=2pYV0A+ICrp8S|3Sx!Te zU{$-#Ig68V9>8qHbWS(O^_6}5ukG3FsoDFc_yPWB(CWy}+)^!F{K=vx@e99=4(=zv z8j2|0pRIpn*}Q6;;sE&U<^S+m=ry0g6fF0z`ONH^&!${S9NhjFpXCnJC^-9EBKb^q z(?TYICh&?@m$;!5Pl!Z2$?u%DpjXTetDkdppe~@KJi($c#A7i@e$x*sMb<;09nVp` z^eC4h>%_D|ezvj>|7Z6-i1r~?r3-#O{1e_YF>Z-D%c>fWj<9UPNDL*8#gIMK5Dv*J zc?`;gR}^o)MON&-lH7TN(OaJ<%90yaAX;y96d*gT(uy5)ksgLWBrzRPa`GWfX8e*K zOB;WA5)3oxJp-5sv>B+35BmzXMh)1%e7SF^(JYnd>r;yoV*xq&<8P(uTNJgd@hDtr z^A$8S-gkCa8Jh3D`xZ-}kw}!k;1-VcWdyo~(58#ytToh$|Kanw?@U%y8HR;g?p%cZ zR-!IW7t^)T=#e>zU*TQ`hjG>?W%F>uEq}STmrPvfRvuf^+DtOjdrKTyMZc`&ijDg= za@6t9@a5P^hc!6`8qadhhP%%$^~T-y8f&c^_@#IX^J)eohag2Lp7Fs`b<=aqHPcb! z`-6;P!n+ecu^8ofwE_t|qqfeW8%un$=B44pEIIm2;>z2$J4s3ISbw%<;sTZCeb3wDvv0^F1n|Lmf^$xucC=DGD@Z0()GOLTVZt(3h6) zZq2o{sY}V8soX(>H70w^V$j#~s+%J&q_dMhg#*tH--;)`wZ9Ye2nV6Tdi-ylRGYj@ zW3ba%ejjJY+f_X-So_LL%X-7MQ4YBel&MrBILqzm5(;6(I?4WcNpmF zf0$qZ(N>GuI8=m-+ar2hV5K|sQ3#ch<{_%sSf2_S+K##OU7{z5CYdQ>|LUOH&(ANk z;O%>~>vtc|*d6h7%!w>Tn{bz#akpt8FD)1pV5y4^L-$%WF`J6or7Z&nHUy^thLx)u zJ>;z?&kLPNn4|g8LJ<8UKbE3)WxXgf`^(^%oxw!nClo90>znt5V*ZT$&tu+JGw$XN z@aBP8tO5Ff|~`rta5T{^=|7yxRoY`*aY%6X7nYViGgJlbA+~R zRQclJ`fELSIQ!($Mlozjst2f}Kp~y*{d{oWzBl;=TS8%l5ZY$u;Zn7caP8sx*rDQl`;$)0LAcf zG$V$Xp$7-?M;U%CykvcC!0!?`*{aWYIqo~`# zlu?d-1>dMthqZ4^ki1P$mFESyMwW_V`K0@aO3B(3JX@U=y z?xY9f;`;Eq7}NT={uhd@hnSJZS@2xne4qo78jqn}V|w&qcXFZm6L8L~nUr)GV%kIj za3r^}2D+DtD?>yq^8)FFQ4vx#(hpsv9jpGC8c_TO^6Yj=l{DtOwQm3jB9$sH+8~X& zGW1*hJv1m_PqGMcTZ!-AKzx4`eYp?BF@oM`t$H2+-nwBQsrXV?KtdNtoww9eshOXk zkpeTrGptO-hb-ZKJ>k|ZW_=LzFMCST+KH@@4D`PG%}RX3Z-zb9Rds*?wx$p=j#r{x zsvT>Jp3v2mVd-8cnPuuJl&h(Y`}9MmwHZ)@ngB3={PFS=f>0#z zeGoLQSnjCYlsi5Hy$>D40rENhSvJM4xs>t@atpMt#XHG`BRZ@Mpccqo2rE`p!)XfO z4=kWYJ*g&{&j^gZ#pVN?y9q>@?uTh(s8EV8=P43G2PJ-iW6~b9=@?DBt2&P%R?_qB z;I2M~$NBaGMz=3sq@6mhinXX}`@iJ?qhC6{+k8{gE_y}P9uvm7m|y-a>nUdw{h_)X z7Gls^biuOmqdC#AvhtMSptA(j13v)RF;Obb%5b&chf6nJMr4w0RhF5Y!L`*QrdT&@ z(_yuvh3rf06m)%T#0*C#pAp3gZbJf5(NZ($2^BI3e&SzE=<%qu)_KAZpBE<+g^m%} zP5sVJJN0wVy}TKan7lV^V=&SG#orQ{iHqiSrt%-T@&2F4&E$ujo6YSXWx>M{ILj?XA<%>_Y?oyZI^qS(G zoHdM)qLRR?qHptkH%ro!ml?E}Mj*Cf#U{&=ANFEtHT~;B@S#-upa1oH$29$d_WHJX zyBOV_?*|8WtyX4Zs06+1mZdghWqj8i5v5bX~yrKeeUNFkP^p3arr+?)jor%nk@Wz zu^=VUC&W~ZU$<3{Iz8RZ{WLh}4jigSHFi)!o^lb_foc6IVsP^|M^oBKmM)h#-{9B< z#v1s|W5>;J9#?{He)DLHnbETNv*2H91A*UV|s<>Z$X zCA+tsFx!Ne9`6LRmAkPFyx37Ffvi}5t#-AZv%Tp|fKoP_*mq*VpwGfT@{-ztv2n85 zp6r!asd5-s3UzMjdPV|knO#oPt?V$Bk}BdxG%qEE;qh@sTsWr|{(+rTRm2apaMjt) zbhMo@XTjcBr;g*O0d-F-fNhM|*e0}yX&f9qCb!`|90w7%c74n{kcv=dZU3N+5Zxo> z0e0>vBkqJY{cZV#_;Eu#G$ZbV?|O6yq_fP0u5b^!E$J z3;Za-y4fxr_@6>dd$KXL~GOix?9*<;aWMcqrORAA)xwXhMz_#ipl`Q z^sowlmaV7Z4>V@P9E5m?L2R#=ZQd*i^GXY2&QAIRnQ6s?Lena;%{k~-jnKib$eTBk zt&ulxdi03U3aRAC0vZl7t%}ao<=D}|6hT*^Cw#Q1>r4^Q71B5rKYuKGLB2wmNT&kO z;S)CS>=cVfzpaILopF%6enM)9z8*SN{7~$vN)iMHZPvkQ3d!@-ST0NvBmm?X!jJD< z3V&^m*8n79COp0jus2yhP`qxs*P?WpfJK`&JUaV-iPAQIzruGwS*NjC2m`P1o%j^7 zx80&>%ut%{sLg9_V9A%#81(Rf_fCBJla_OB$}c9|jq-!{q~YaVtA90|14v&i=56;S zVWhT>)cp_?=pl#R5VLu$sBrvnfVgJD5Zm_czPZDKAv)pWkqW?oN5~;?51XQMYpX!b z_JE|i*$LsW7SQ)MbDi22)3JQHkx|CmRY9y8^5}N^G&L$Ag-8F(&5o9hWE`+vWlC`0fa$~)YlgH@7{1KuW&S>Bwf4NG`z`>CR$8-yX(6cF(DDU)K@OZ z%J-8!R7I*&tbziW2)n1>KvYz&qY@dZrC<&ipp_yW9SDj_vKNxqD1EJd5s0wU9$dKf z;$J~w$orzx?af&-HLxDY%R~LGmE#!EE2e3xP7{UM1)l-E|Z{wbZ0`dprCB*S- zrhzN6b-~Xda!_$_?NGO{Y0`tvcx+5-cVIc3y4mt80irf?nX*Wf?2EE6P9J|&WigdE*DGmiJg=t4&S@i>=N%J)bf`Z6ZKeg1%7uxG2aR);MfND8CYAPyFyPGMYt^$qKnrW3% z$SpArVq1j3#txxhD*8Y}4+{p9jC^cE07^fP60pEvPzo7{Xahe+&HO;XRRzMR2zjld zp;v5?XI3_H3$ZbTBZJPJ1rp(gaoX}f^p*3=jK=0)?;=o;hyG!05&GtRA8h21swkH1 z-5SV!CS)i=sQQ6m-0nphOJd)HrWC7~5E+EyUr3o_2&gjrKSi4KyjX)lxor9XF-`!8 zR??<1aJ-6BkOt;;jF5-i+2S$JzTECBD#pUHK(3zIKxYS1w)Lco+R@!)l8fG zPdQ7WjNxG6xC*zE0hLQf^Cy>?$z-lP$hj)$6_WUIY{@h=NgVzg6wcTRqLJ%5=SOQ|i7AT|=No$as41ZvK z4QR{k>e^$@HrilpOnL1f(j5!~6YAtW8m`Uj=zkDD+X%H#C}nDI3r2W1{;Y^mr^gRk zxHdSnPe|A0MF5%t#=i~8cl4{{|L_?>d9y_xZB`z-x8c7@q~p4Xp#7n72qJ!9(p=d7 zOC5|Z4ZtyL1OKR$1|o3=Mlj3G+2ijJ^IDompKIkn$Z5;uWQh=yWB@^Ax*DK5vfd;6 zYI$hz5@Oc*oDgY)h1+KuUI2uO`hkML{SV?0b+rMuE;KMA9^b;8wwY-ewQgsbfd#@v zIEXKFp;o_ucZ8L!ImzRL>3Tv`3a@Piz6;DfNMMxFyDEs&M36-s#lw13EwNvU)@K19 zn3XV!YB>IfNJxmaGSn4}Hb0T0LWpZ$5 zJZz89&pGQX1rs(fR&Eb4eFJ(_m(9#8kWNGuA^GwBjU`Y2h2OIaTq?Pz0~I8P$qLy<2&qLfkOsI)Ut}DiO49-%uletnFd&BXK$LS9Bn-j*7v& z4U{ve8PqOu+}R*K?8sx*J=)*PF#I6m$!%T4zKFO z;O5;r4|BTcBQW^W)%EClIa!Rxv)O#LzUnd(4MTo@5<|K9gF9*4!6^4GJX)+MtF^?s zu4VL|ar$ak%&eAV%7e^pi11a%=j;Wp*lVGQ75%ou1$*CZh`D@$3wA;<@_Ne&9Y$Do zd&KMuT(7Z?ku6HYx#q-tx0E1%o(sO$>yL5Pq`tSFToX-rs3-64RWs`Zx$>6yF%Q}``@lUhnwtl4Hx5&lZ)jsuOhu; zc(3pN^4D$`v79efqv`B?@~>lFNb#7?1d3w2Vn8TU;cW+z6KiXFdVqC)(UQp z3dBo48vH;Y_}2h}KM4qXR3O9)10fCsLVOJ%#FKz<$DJ}>da^hW2>DeeiYEc#j|zl( zVIb6jK&Y<)gn9}Pf*lnI{lY-#1A)+A0|@;jAdVX~nHL7a90-K@8bFvQ0U?eGgneNk z?14bouK|R85)g9lm8{L?CoCiEBRH(R?-P6SrpxbtY8-F7421nUBfxdXF$gdGx}&iB zO4@|bM;(d%ropZ;S^tv!;*)Q#R-?=DbT(Rl{dZqsoDRFrgID5p)D9W7`?;`R7vrwDxU9X-}QIWb1jXt%=7&->yN9+qDd&8{IVJ^R;MTT zhbCX+H2!ZhWqo?nzPtvLa(tUFJor}SJ?^-q_0EKo{~c~*{?C&?&d$4BLh;MrZ0$S~ z3&olsFW&z@;*Y#p#cTUt>397|10RR630It@It|Y1l0Nh(6761SM literal 16168 zcmcJWWmr^g+wTEEX$k2D326jGR8m1Dq;nWb>5d^KBt#lTx_jsz8UzHS6^0fNkZ=$| zy4lyD&;2~_-tW7Q{bhggz;SV{bDjA+|7&Cl$HleAI5)$(aDtiGyYgGRIM`$SO!IM| zdmR7kVqQgty59f!-E=QMQo<`w9|#3Jb-T7c^%dvpl>pZJw=KBdCD`_=QJ%~?=xDGi zE`~R8Rq0*#p?@6DRW#-^^rJv!tbC}@?V zupizw#4&2BO&spI7ulv$?tbq=!>?pch+eb2aw3gtX%435bG$h3clP5y-4AA4`K@~Zg_HN z3|nq1<);gMPg0V2-YZPwk$-#`?NuVoa~wOB2|4>*!??yPFTGqcA#nZ@644*hKHL#1 zke?ASe}4ZEKF`y+Dz;-fz7g3I*V8BXChRMOxIA2@;P%kj`luW}mxRXjH36w(uipeH zqfdOR23F>T34Mhz$%OXSOv1hwt#d!Qn_Xxie&;Q`=1X&M7by&n->Wl2DuMG4NqEca zqk2lIMl$AHIY$pqFYYAmNwu^Ux2C%Ku4z5&8XAu6ZL`lGuBTj&pXf+|hP~krmfBQr zJ5*zO^ZOT4&_pK_@bBo+=kz=;pFm0r&&TOarz1?+<-xXu8tnEFeC_!H znKpiAC-Nh1UH+@(3ejsbBSeW;zS-l>`MzHc)u!h^Xed{Tx6C4WZ4tU2#du=0t8ZC$ zg^7f?`V`cghARSetzxWS;COrRB({_~`ma;q%`==IZwTjPN+r!nbY2~)&>|Gk zaBhRxQf^nZv-%A2^S53IO$JYd)$Dh)AZ!KoDd1YfY_bSFAKdJ!^S5YU=A7QcB4PRjBf;;Dd8eU#dD{8|DWOhrhjw*H z8N1GumDZkTi?xOGO@gssh&Fb)_Lh%we^1wsZ7iff6;scg-#%q5k2He5*)N;QgCDAY ze=6*0*Pt};y`fuSx#6A(ze9uKfJg(T!b-ysb4p!6-t-}ED6d$-(5l`0VX@}8=$OyW z-i;A+92UD@ncA^_<%+kMaJ<_!f5lAwNPSo(r8pTis#Rv4q8FdRQB3voV?LRrP-pS4 z+4w52cHv%BL@Dn4-*i`}N}6W#Sp#V1I5s_jt3*~?u#K{T7aq^QO@ zh>EO{6rv!x%2P~r?5fxD_8Gf*Y6q6tUPr2Aak;Iny4}klTkT15i@i7oThnJeQ8AL=*6D4P5j+=8cy6Lt{i$kFqYyIgMRaM-MC1BuGBll6WRr3 zdG5M;EmfQYs9HrRL-vCaXRibqZKm&ngWbotQzhF4L#SHv(7Gx~*=NO5KdQVoURZC0 z-3`Dww5s;9h^_YGW2y4;S=1cc;VO36ub4mqi{gPrn!uuQU{RwJ_efGgqrU)b5lR=>Vj)R+C#UD z1B8nq{pOr@OKZK?4Y`GN3%z=NLs)*b|(=x=QS?&{w#Bu zqPs46{n~QDQ2+J$3#)VGMC%HN=FhSUIqyg3L|qGJE#KL#+$|w+pXDWZ>?>BjyC%Np zAt~yzrLsxUC3YA7mP8JrT(5H3DGA&xIJ(|q983u-JBcHQYd4e`B}?L|`q=%}5OlM`gOzqiDzg1E5@USF1u zlM%9$-E`Pnr{zZ4cGSZSIr?o*@4IOCiC~Vel|lYS=a;T7_iDY(Uv?K74*MurNZWd| zx85?*u()iI2zQ)5ZH2Y(_Jw^|%Ho2U#A7b@@cUVWt7Drf?)hCM8sldgl*>}lGbyf= zSjTXhezP@&s>Al^wxG>slahBMab!Whx1!qw?DpJ`&WiPXLRn_7`(6A#{IusDroZ&F3&_?uVqbksa$^TZ5N5^Cb!~VRLUMzmG7?EW zlgF8#l%9CD`$#4)@Qcp?aRTwL@A1VL+RjqsOb-(4;O=HWof5ATF?B#1M?Y_t{34xT zh`JVMKRAC*na^Q!RO(toxv+Pgt}BkNc9zd@BQ0;Uux!C~{3|>`bJk{C?>nTK3aOCI0pG zgOCzGk0X_{k>wY+r`>**avoSmd)w5Kj31{ul!b&(I=bBI{5pOT-u7i7ddkhOSFHKG z+RaZ(mXXzblt?>IpzQZXUbK^UQqH{LvaRm3?pc1ityk2aUeT_-qR|z5LL_9TY`Wr{ zjjLg66C~b`B*sZ7Qixeb;F3F(m}-|k){PTkdeSkr@SL}P16#=aU1xvZ9k07uZr}Dl z5g*cV%5K@}2;|U5H=_6-8~J)W29G*0LRN4qmp>Nn+I+hz3GK*c)Y4N-jQ8%+mGHNi zF;!|Sd}u3ig~-W0Q>ax+K8C?Bz1bbAR|t0&vTt;;rx#<{Gk6Kr`{th0nA0A&e$L^$ zKGZ*_CA3Mq*My2+`cAUTliss4F5UuNYmU9ol*@5iIm48z(%MG&NN07obWk*}E6buZ z9BS=K^Lm?P*5XY|yuU;w593-jl{sHUkXVSqm+z#`HQx}Y4;7yW#gpv*j7W?3GZr7= zeKhFIr%W^#m(`qmG6NU8aCK6<7(LtA`Swak^5{HTrFEXqqQykq4{|*oMW@bN72(z4 zc%*WXU*hZN>-1ZYX@bwl{SMS9&lKRUw-_Th(^Xa`Csw{e50@r3YrAG|rVA{d;<`^eFUZ)imW^l`BqHUnPAShn zsXRC((s6JtU3yA>H}p3DchUgG#8crQ`0tw*dMXkQ*ZKK_T<*VL54|lBD$o>ZPtX7K zTTcxCg33LaPsLQ?R6j_mw3261uROz7ujcgaHym7?6{_;poZeuLyk9HaS~6QpnH^Tn z7tT^obuC=LDdOnbtXZ&>hr(%m+%mN&P(L zSEXajaSTEC$4}#1l4;(@xnSFh9T8(qPQ%UaVnS}>RAb;%I^2Ewff9G}>To*-q=y0% zB8}%_kVaYoBUR7Ahukz?$ABltrhdD|c#3P^FI981JAxN;rsr9|R0aJfO)0B!@;cIr zE=Nw024|I+CAMDmKRw}SH>SUdavr3pwf z8uCf6B){5Z9q=QGFLvEBlRo%~c+x8m6DzzCOWGPZ3RoBUl zbDzLw7UOU{+m?20#GDfr;*Isnw$ahgG;ni#+rUi{|56iD4dBik3A7?93v1!63OG}*Z9t;aSW#s%36*>AH}lu9I=_UF40VyLw3Yja z1b4la_{py~G$ssf3Q>9>CW3YPXPQ4>Oi*pb7jN$m+SDNKWFU{sF1Tvr|0axN4L$Jc zRolSP?d!a9-ji6?@fz5+ul!3LEK=b7kPE8+~uE95kV z(=X^FZ>0M3aa9nt24W!8xv>y2_#8U|1n>Gt{+KQJL0cb~t=3WUfX$ePEmz)qk`)tz zCq0(CgKhi!TL?CnON=2&h1?YcS*Bd#F0z|Jt-6^5?TI{h=A9sH+X)GfDq7$ki);G7 zPld5TDVbtHx`bYpk^w8TlS5BeE#JS#Mu%pL0lPx$GE;Jmxi8GU748FbK4@EoT(Q;C z>;+}_q(h%;+4A}%_bSM8vPtfQ3wxF1FTOGUTWs}w7oXo@+kWvcy=qCSefE$7@g~{q zGBOPAi=ft+jf-bCI}?a)Yhys@A5XM9Oj+QPN?sOuZ3VcCBDj||$Idl`K52y<4x-(i zlDdq=<455}1KENsCcres|*<9!BVc~$+N5@dMleX4+EfdW8nR@k z9|D;uGY8u7AAX=Dn$&-L;aUAU4>TkVm4zN98XO-DDLwIEZmwLSq{SPINaRHjbY{JL``>?)}1>#8S}R znmNLNP_c*>Rmfo>i2aMipPtBIbwr~_T0ijT6+}C_co)&dn+KXBv+?@-Y>B@G_F>!# z9G7E56vOi9Fv6dYtOCY~VHtvf7y{||#@bx#+}d2?pK?iioMg^{1936LR&>Hl_5(~{v~>$Lr~ct*|D zuY9h2=HsRc9&D%g`tDM*-d9jcGf(jU{#m`avAe{DKL@cM=Oiz|){I)sQo=1qzF)^( z$RmbMOt#VNHdd!NiI)(QFmN@Wn~bOU9yhoP|MJuCN{C6nwUJP8ytNnqWVG*53dSc* znsp<7LpYtrgUB?7{WOQFO@zk0+q4m4w|d~j_f9jPIaAp71UITvbi?lXh7NgztrUSn zMY$Dao4LTrYYj=;n?}kGjd6E&3*3L3_!b)dXcpkyye}|6VLEoS54HF`CI8dP{Y}0~ zuDa>op$-3gw?PW8y|VtMD%>{J+d~2LExMmfR;$DqUosN+siY+^wO)*#rl9zS+Jri& zA;DGSBYo!QCH+(5c2$Fn8--swV&?}pyU$uSZ*;r=Yfws_ zZh_6jvqLNoeC> zlta_?{x3Yw1=Z4u%PzDtJ{=1f@jvh*ga0LT!XJMK{a}5$Q+&1@>Nnd!GaRC`)OXHu ztx)PKQ`PyxvecepLTs<)Ol4jA*J26#GOcWs?zsCz6pfo%;*3lx?C?2a*4TN5b+qrc zsUp-ouD!ndAVISG%*;evL^t1dv1&vkIB~jlcX%o-uT5RTRKzB)O{Fg@p4PX5oe1?! zM*C#rM*sP{CB=e$8%e!@Z~NRMUt3J&TZLe8<|6?#IqCuZ^epu?4}Q=d?J^59v*s`B z>0oinPtj~>I3z6;_NS=iyROd5v+-?A2$SME$8xW{djII>Apn8UynhwiIrtl|b@n3DW5IImn5Z-?CVplgozd+}7A@6nibmJwU~OlLdf z7g0Mz$)mremuW5x*NvnZhxR!E#w1HUt4BDV1CCD2mNqVq`i9K6=YvjD>q#%L9n=== z92v2jDSi)GU^~1OQNpy;)g*qpMTtL$Z|C@}^o@2Jd9xf@CFOW(-8F|`VNFkPPT+c# zU$R#~-5ctghkOTNYm$-GOo*!s{Y?Cru zl$H^En8qPD;=l26G!vCQ460Mt52%ar;@H`|$(&Op8c=7vrJbH{&&M#QV23wnYMprX zv)~f$oN33*w|mqcJUJVZcyp|wi8{d2yaQ|pAz(W6mx@LgaE3HtHiiPz4IONUg>R*c z_{>#=g_OD0q|I^xbz$VqIKg%8u&GW6JKmgZRAt~&l?vI-c8ID+@lB3qtv>SRV5BgP zpjM7(J4Dl?xTf2`uFnIQPHT=KGT`**8HlgWS(}5qhv_oT{r75ya~YHwpZu=GsgGe7-Y zv&zTy#|P7bXdjsCo_m=SqVG?adJWT|_BnVw(E&`$L{1L^e23a=0d*YuuxDG8Pd+LU z*bL1C)^TLpXnGRhI1phN3#*e~#r(+J?r_u-&*njJ%^`52DwgPuxK>-H1G0Xj9r8rN z1w%tL9heN_=1Hy8KC#V7S#{WnEs;B;n>^$58nftwypzZ!ool)3;< z_e>HqKXt@*kO6;9+#2_Mh<3%xb6^8rKplMR&fV)+bH}*lz@QPq;T?zW)nN92yb)b= zAHGfb^rJr4rsegy<2Pk$%X~MQ_b~hiA@bDNB!M3N7^aGsiNTl|m{^DW$)Hgya%siV zGapb+*bY7?KF1&`2k+2PnOQYjUSNACkFG#32a<)-=L2NTILPX0r=<0y=LX>C;JVr! zl5eHYJ3#D^f+pJ&6&{?RcBHouZVdR%Fy3jVbl|KvwH0D7ZMMdR`f=xEJ7%U~Q&P%l z%!C0UGS+R&Oc3Bi+UC+0&_C4LFqFoeu^pVik?Eimg6eA5okQxPgq61_ogx_M6TIJj zNCNT3bFi6U>3|GLpyRV7=_f%B&fsn)8VdBUGXTvXk(L8fL`nFca&fi+@eNC?1;y%# z)B`QzM)UH~0j0~=(O+-hPzS9V9jm70u%Oe`^PJ&hsxhJ2NQUNm=sZZA4-irZf4MD6 zyt%M__YVpobpiK5vw3dY4n2Pw@a<3GGq5&Or~|hmg_ADyEm`>fDV^XIbm=IqO3m^{ zF#i)JN8pp(H{yQsTiPC=yOGb~V|?JC5oMP9$V`^GD80|f80jFPkUK5?Ob z>Nh^J<~Z;tS92Ua#+(8u1ZT82eBLqR%>|%4(jyO^xSJjXg%{>oPn)IU*7zO8b#^Q1F51)N)a0w_GVIF25b6=cm{U(vug5}UbdxIqdixl@^(7B zOA%i3FUO)EC6O2vT1^Nou76(YJ|E1SNSlr5H-Y)DNdr-@dTw`gxFyBuVwC$s(5{kI z+k>%?^17hp1@2^bir(m+bF;Is?=K&2{S@4K8T+a(?F6xNUR9W3Hh)mPU?+5<`o2xu zgsIHRfsLS=_DQ9La|%$c1>B{r1j5vYcqz3)gl>1Yr9AEAc(mkFr5mkf+8|s|yHGNo@$NYIvQ)ydB z`m(&ao?2(Z6=GoUmGaE_Hr=@zAzDi2?_OSAX!g)eR4?>8U118z`4T<0RDT>=2i_~8 zW*27yvwspjrlOi3G;e9DJL6XN(qg~0d2iE@xM$fVDyAq(mZMRE*7aACmOU<+d&%#< zgsbg)SRaC9w$!?L=0tTISRk64a;!%Q%Y2muHSjU`WZoZg?9FxmOO9NB$uSod`k*;| z3$DIkbK||^*&s#Nw_9OPgRiK)-%x$uXCHZ!9pUgUpE~M28EwX0S-W_$?bifR9vfLc zC4e6BdA$F7dVE(7=n>Wuhp*$TZ}S__qgD&Iwb0wt5++Cf7#C08G>Rq+-kGb$W=*>R z6UD`J!`O(=My8c+zBI1`?UzSqde;zZLl1qHX6)Tx&iLr&80rOvq6}bXb;pm!{f?YP zk7HGRc01EhpLWbtrH;po?HpQ+9x)C5@Rd1P9~$(KY^K#A#%*|Vws0P^cU|D3(5Nw0 zNq#1(W$(M;hLd+`{t9D$lS@mRn-YSj^DFUrNheeF24(Ebc$&5sKjMb1c2b1R%a9*A zUrnTW=tXow6_r&eUs#`5OIx^W<0$UoPL^olNZpFnM;*4u--WXUC9QDc3CGf#u-1P1 zYX`+!J zk@0L}sBKu@nrp?Vs*GZ2boZ;xIC+kukpCJP*B97I%N_OSiWnc`IhL`s_?0*A_)pte zF00Sjr9SH(Zkv}!4X$iE)MrI184M*Zr4A+CkZ12s=xuuhrHh?*UUID-S)F%b6a7H= zVw8wsqsn?Gs$r*GSA|fVK+tJP(|RT{wrfJ?i#wap$J7qX=ByS)yIrMw@!?TNr!!ee zaI6}Wb)t`73G)u*_5Ep9oZ|DTlNvY9ewYgp;!SBxq+LgfhIKwr$3xjW8txBk^5I2F zXOrfzOS{qpH2SMfSY9E4q`xQ7Y%edPgJQCdw7S9dTccxJXNJ4 zMCr3QW|l>c=aVD5$f_u{^Xsm`N+~r9!|`FILv3?|@};$Sk<3}7HaDbQ^)9VRK20cs z@H}Y{jO)h_Xp~l+&{tNaqCj0;HM(TQ}mMppwH88MnXS8>sC+T5NO?DT%7 zN=1UYiXPX<=NOs|&8%<&Y*X8vt{|y$Cb86Tq`vf${ix{=Il# zHGx1*mCB`=!63n>h?!RgstKn{4Z(#qxPtJBDmR{rDnAWqtXCDQzmA34#%&};;*1K} z?Mw(Q;i0l+XTLq5hWEGRD?n9Qc8()5`47^k1#-a0&0jML13(@7T*riBaMh3_AhnE8`5mNkh9NYK^*YRDEmQdDX~m zYLE1#Aw+Rw{LIhg5WxzS4_Muf=6{S%4Q~JQ=L%}kn5xU_z;+U(mO}dg6B!aX?Ppfm zNK_{Ong`@cExxTr;0s2RgmWa&F|Q04blc0wBO z&1C_DD3N1Ii_%br4OtacDuqw31ouCK5{7;J3Ref!Z4FF`WB+4HRdg8~heifU4==_% zj{NEg^Z#VKsS@gcg}Y3)HTQ3K9_$J}y~SDkp|(|c(Jt^d#q`TMeLF_s@O z-an!41QC#hUis`-;>G+qy)m)RjPOrmv#Z5LnaV&%&s+6>s7~AkA#`ot*mijQA{9gd zU5zAJWxwOW@%cxm6N)qN4)FZ+$l8KJC&iG)csdZXd*+_!V%gPd_9p99$skG4r{Q{7cIMq&JYG2YSjt6R2s{XO4z(v`SUPv4!RWys!qsh zF@t^`G*HueFE2wfd|@ac55VM$h+vFqNjU(M7zW)FS=sActmAi{C`K3W{Ce6*dB?_t zy%|sS`Nj_quGem<754_mv44E2wZ59+d2WVu8@oLOw|Jy`X8azHG(OpvfxUxx7O&Os z&(|58bs3oNsd}7smWZA0x@?+%y{N$#VgX;55>VM&Sm$duUtH!$F6}jRsg(+y z{AiMt@4Z{x#4Yfm^8fgXf-WEH9ByH-PAVjaSC~iwC8lqdA~&s(7c6jzlHT+D+TF8j zwnbE%n^d(wUjHT%?cH?g)SZHp&s6_Bsf_xNu_CpbzdUGrx7F))oS7!$$fr*uB|I0e z@5_i7V6pGlRM}cJC|t6n4(xwo$$Ir*n}4xnV8j2&lFz-gmU;fok|l2?oldjAy9f^u znUE3LUVq?qG?H0wcr#IuOLAyUE)1LKfY17nwGgO~`I8!k*~*J{*6(Ng!zbVCDtuA; z$accf8-DN28$Y|`MY+Y!M}^NV!%q4{yF@2$2yBVS%{+QRA~*F+*@;NBYop-SG%NdS zdCn+pI*!*0V&SHF2h~+yHg3L-Yv}>sK=VB$nHxzP3OoPq<)uM6yWpYPJ8`;kHa)Kp z%p+b}>v8sQyU%EK_Waff%j(rQiTI%~s6o8lQdN~%rrKt~q=(_>rugv*fli=7zWl2} z!h`>6kn58?Hvg$Xmg)MD75z(tME%tu2{wQRDd6>w28qc+^*?BkaF@1S9tSTb=qUEpuG6SG=61A?>Nt2hPrI?iH$?TR?tYbTMK4U zWQHrbJnN&K16cqcSE0(O)E2)Y^qmjk+L5beZbmYSQ8lUqt@_GB-Hz$`7DkilBv1nM z1Yu-v9BN^IY-)Q1CMtnCztZwzfWGsGn(w&h>V>}u%+rOk;jD|^mhqeg-lgkR1Fv;VuDMRxQOgh{B8zzN8bLU4A{R;GNd@wx}gPw~0bWjXY9ljaza6kpZyz^tVBYIdPS@s5=C)g!icm!roC zfV*&Ef%DtZeAljEJ!wey0wWOo@?I+jHjE{!%tJLMHOqbraNEFd#Q0GG{xmtdpY}6u z8>{M2gGr}3s&u}dC#Sg}uS!LX3L8b%=RJgy%7_It8sA&l&hU?q_@@Eu;Dh0$@hMjj z%mA~$IEm*!n+qt3<;xrFHB$B*GCVMAuA&soFiigt6DJzq@;KlG1Yqh^g*1*(gWxyW z63sH>_|*H*554>^_&;G4n0+duSSIqn}Ed<3^ih z4R99*a&A9{SU_RiNYN31OyrlSqWDRF@>Qz@+y=~kUAPU#y;vGWRVwmGYJW1kCGhal z;Irds8VgGmsM4}C_-zuT=+I>nbRV5&+3%46BoLn?F6)=jsu&>qGi=|ePB?=@G)L

Q;su$e3aR;dj`vx*)Ju!_El_vOPh|I8>% zzl;lkDys>|3^cV)c;S#hUZYnqTokJebX{WLltpHiXsDkNrF|Wsa^jL*&yETOJ;(@v z0zE;E&pglo$+4YL@1zch_)l_7$o(P6!qauhRFgle0V5$2rbdA)1LbA0$s){^fUiFQ zsetD3gcIR0VHr#8zZ9E{#*{rku@77{3kix0K)sM4_kJVvFLyx)KciAtIGrYN{dW)V zBaA+AtvCN=>uCX$Qe<`8502&;<3Ie?vVXZ_TvPZTFZ{cI(&Tc(NR=Ci1PcdP zCYlE_3R9iHsz(}$Oz)R!1Y5%cIOK%yWRfi6J%L68Hrt;i<0;5)e3;GZGBHd%78v>6 zqsa}rUl_0m5*6$=52WG!NtBms#ucTnmLL?AsPSJ9{ToyNI!7&@Po}@r+pWctv%F(hm4GWd`IAubQwr?-${6t5BUv zqahbG7;Knlb15ZoExdnbUWbPPw^Y8wa13tA*?)7UCsk=>~ zjDLEG8`erZy^-N%;-P6y2Nx8$#;1Vwp=&7otUv1zdhe0d4`__Byy?MzvZf9W`Ad{H zZ&Bqt-wJ4aqN>C7a3kSz7u;9Xc|>Oeb|6{@IgCSQct>xt3NRl_yX>BSEK3 z2Oc4uhjlI9yZi|nDwoTi-=3BWzg%zz7NnuKKp>S^2+C~4hmNhIF4S0>JNFCUB zpQ#j+kdFv!IegZTJoa*_c*>(3^e(>l`I000MR(!B6(~Ul14p`p$ZtgTyxWS=o&9Rw z(|^C{-tN@euroK)33NjgycJ8D6pb*gO9<`MW4e=Go-a15zhF2{2k&dNyE34!KSHF^Qv~|N5?it>ZbZ)+vN3*B)U*|8}ocI?X-Wdx& zI0#=%d5KhtW!=gDD!9-bUGkJ^9rx7g{Go&X+`#Ll;PFrvLN5spoMwX27w1zD)0K_vE{Z23{)<3DUk zJNlO`?*g`jC6jI7PY=u-<0hMSO_bi!zG)fOGx{E$7O4SM6QSiP4m*HR7H?h1L~^&53huZi8N{ z(yn!;cRJ^Na3LI(y#YX%3{w9|mkS0w<&vdbcvW4( z@hn2vN1jjo{X!u@wo1WYz!W*EW7eWvwes2z-mv;&hF1CBuiZO{rw^?J_|YHh{E9oF zAf8^DPh8XBtnhZi@A6Fc$J2x3z8e-^LgCvSPA1QzT<)!^^TkmbNnU8m2**#qLkpB> z1kJ@D7io0Zd|Z#O4&K=Hn+Aeer*Z7^nqzm~nHcX53k5Gb`NaSnS({DazCB0qHb z?{Vy8_>VhTXt^L7Xp{f}V`z|BScM@9Pk2-9DLZ3gBw({g(oj3m0k*izUqI!%P=dvO z#=S|4m)qM+3aiK zln{ef2BbvVNw|~&8YstJHr-36B5{Etm(bi^i4_ONk7?=WK z*;b(l0=)89QHZ212P1k!B+2gplWxNkD5S(FeRjdjvN14oi=uvF6m#tcA;P6*AMsuNhoVmqnW;V{v_Eb0>iq)&itDpxOcn>|e|f7Z}uto_S*;X;R2N0AGU0y{p(LFYw#i zJ+|kn6ZbErko)v-9%`U*vILj7%}z-}+gVrS;{Ykvxf!kuYqsaIiUhooxBjcaF(OOP z;{9xa2Bx{Z4Me@nGGnI#DX=P_FaM>gBWQOyoNEWAv;;v$a3M?6uHnLPs)1Yv&)4%0 zrL(zY*=ZkKaP9CV%?=a+v6gI1mY$?^!;Xh@LDe?f}>4@mV#0+PEocL&s0yTZRg0lno|C&vO+ z(gm$b^1BmXLE&?N@SzfofrL#^hUQ^tI|VVa)ZTeogG%$$jr(k1hNHa#_7)7|y5o2t z#=Vj65dY2u1sQdT36cnaz^co)v0-mB%LHl7&top@>^dwMAnqR}j{H)IbHpM;;REId zHVpeM%HIa7&tKrDLBopfJq~gID7?RFe7iK1M(dSo9)EmHt?JbgMr0hR3m{8DI1gq; zib&Ky70e5s6c@%WeIaC3c**=g8OBBRfeg_?YjIP)8Evj`9*IkT>|^|su0JSlBDIu2 z-5_heHGUOUb^@NKLU9iXW!mq7+yc-KwhxNU&%j0k6t@*%?J&12pqjD2;QJ{(&xq6_ zbpe_FXAO$_-_}A3V9YXDK#H8f5(?BwaZu2KB{(_F0<0-=8UKz6W+OhCTL6BSfVwZf}TZ zkv1tqM)28h z;I=h=bdkYjir}DjAFH}wuBFC|YQBofb?f40ZfLw1 zki`FB_L70SF^a{sY@;e6gN) zJRc{h&1m)C%}FOU*$Y(Tc%*B64114ze^@(MZl|O$((&g!tVU zuJ90H%8U3y6xqr+XCC{-PM8RHMT3!bGJ% zCmzl?R5*9CE6#12EO&)odF=*UA_w0#eWBgLmEq(dnL6Zp=Lgk}XWjYnUgzHF zwy8$^t?y90aBtpYSMCEB?i9eV%&psAvy<$;D8~#m9T3^Zk$apNu3# zuCX6Kq1V~k-+p^PD*6S#7tu{Iu8(iSZFF6t_ zIRY-20~aWCDf3I-4wbwO{yx?Jc=4`eRm0&Yj^j^^V~^bgn0nI+YW$CfX&9#j>Mbj$ zGoRlVNKXA!9m0+Xwfgzit|~dLVP zyVJ@2_pw+{jlnb?=dfZ7Lxx4qUg5{KL9a8@oueT3Ssr@R_}Ij7;g`Rirs*bzgS%*2 z57%esWf$C5%d2}GI@N@{e;-%Mr2Dq*yz|*H?Z-LmsTs}q8qr@`Jd^(0R<}%Z-kBvb zolKwBB5AdWX;~_+t0X;b<0Y?m^>%i(eNIR$!)ujw&c(6to&p_jt~;)09o-M@nkV^I z5xNgXj@rg3lqKEIA25oDC#><_Ig?~hB9Nv2saDSGt@hgG?BK-cs9@WR_WWW(%I9GE f;NoZ{V<#r~bo1gG2FAt36^s_x;(io6F2?@?H<_~b From be30a7fd5c57913eb8c4fd2266cf9147c5b3d8c9 Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Fri, 3 May 2024 16:59:12 -0400 Subject: [PATCH 10/61] Fix FQ mask in 8da4w QAT (#199) Co-authored-by: Jerry Zhang --- torchao/quantization/prototype/qat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchao/quantization/prototype/qat.py b/torchao/quantization/prototype/qat.py index 7ba64f3aca..4271d68e9b 100644 --- a/torchao/quantization/prototype/qat.py +++ b/torchao/quantization/prototype/qat.py @@ -183,7 +183,7 @@ def forward(ctx, input, scales, zero_points, quant_min, quant_max): q = input.div(scales).add(zero_points).round() dq = q.clamp(quant_min, quant_max).sub(zero_points).mul(scales) # TODO: do we need this mask? - mask = torch.logical_and((q >= quant_min), (dq <= quant_max)) + mask = torch.logical_and((q >= quant_min), (q <= quant_max)) ctx.save_for_backward(mask) return dq From be943a25352839825892a78d11b803ef71e64b03 Mon Sep 17 00:00:00 2001 From: jeromeku Date: Fri, 3 May 2024 16:31:40 -0700 Subject: [PATCH 11/61] Re-write HQQ Fused Gemm Transpose (#185) * add updated kernel and tests * update transpose benchmark * add usage section to README --------- Co-authored-by: Mark Saroufim --- benchmarks/benchmark_hqq.py | 85 ++++++++----- test/hqq/test_triton_mm.py | 194 +++++++++++++++++++++++++++--- torchao/prototype/hqq/README.md | 47 ++++++++ torchao/prototype/hqq/kernels.py | 173 ++++++++++++++++---------- torchao/prototype/hqq/mixed_mm.py | 98 +++++++++++++-- 5 files changed, 473 insertions(+), 124 deletions(-) diff --git a/benchmarks/benchmark_hqq.py b/benchmarks/benchmark_hqq.py index 393481e95b..123e2e5f52 100644 --- a/benchmarks/benchmark_hqq.py +++ b/benchmarks/benchmark_hqq.py @@ -1,22 +1,21 @@ - try: - import triton import hqq + import triton + if int(triton.__version__.split(".")[0]) < 3: raise "triton >= 3.0.0 is required to run this test" except ImportError: raise "triton and hqq required to run this benchmark" -import torch from io import StringIO import pandas as pd -from hqq.core.quantize import HQQLinear, BaseQuantizeConfig -from torchao.prototype.hqq.hqq_tinygemm_linear import HQQLinearTorchWeightOnlyInt4 -from torchao.prototype.hqq import triton_mixed_mm, pack_2xint4 - +import torch +from hqq.core.quantize import BaseQuantizeConfig, HQQLinear from triton.testing import do_bench +from torchao.prototype.hqq import pack_2xint4, triton_mixed_mm +from torchao.prototype.hqq.hqq_tinygemm_linear import HQQLinearTorchWeightOnlyInt4 BASE_QUANT_CONFIG = { "optimize": True, @@ -27,7 +26,16 @@ } -def bench_custom_kernel(x, W_q, scales, zeros, group_size, kernel_type="max_autotune", fp8_fast_accum=False): +def bench_custom_kernel( + x, + W_q, + scales, + zeros, + group_size, + transposed=False, + kernel_type="max_autotune", + fp8_fast_accum=False, +): packed_w = pack_2xint4(W_q.T) def fn(): @@ -36,6 +44,7 @@ def fn(): packed_w, scales.T, zeros.T, + transposed=transposed, group_size=group_size, fp8_fast_accum=fp8_fast_accum, kernel_type=kernel_type, @@ -45,22 +54,30 @@ def fn(): return t -def bench_hqq(x, hqq_linear: HQQLinear): - def fn(): - _ = hqq_linear.forward(x) +def bench_hqq(x, hqq_linear: HQQLinear | HQQLinearTorchWeightOnlyInt4, transposed=False, tinygemm=False): + def reference_fn(): + W_dq = hqq_linear.dequantize() + _ = x @ W_dq.T if not transposed else x @ W_dq + fn = reference_fn if not tinygemm else lambda: hqq_linear(x) t = do_bench(fn) return t -def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): +def run_benchmark( + shape, group_size, dtype, axis=1, transposed=False, quant_dtype=torch.uint8 +): qcfg = { **BASE_QUANT_CONFIG, **dict(group_size=group_size, axis=axis), } M, N, K = shape - x = torch.randn(M, K, dtype=dtype, device="cuda") + x = ( + torch.randn(M, K, dtype=dtype, device="cuda") + if not transposed + else torch.randn(M, N, dtype=dtype, device="cuda") + ) linear = torch.nn.Linear(K, N, bias=False, dtype=dtype, device="cuda") quant_config = BaseQuantizeConfig( @@ -71,7 +88,7 @@ def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): hqq_linear = HQQLinear(linear, quant_config, compute_dtype=dtype, del_orig=False) # Reference - ref_time = bench_hqq(x, hqq_linear) + ref_time = bench_hqq(x, hqq_linear, transposed=transposed) # Custom kernel W_q, meta = hqq_linear.W_q, hqq_linear.meta @@ -85,26 +102,31 @@ def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): W_q = W_q.to(dtype=quant_dtype) scales = scales.reshape(N, -1) zeros = zeros.reshape(N, -1) - tt_time = bench_custom_kernel(x, W_q, scales, zeros, group_size) + tt_time = bench_custom_kernel( + x, W_q, scales, zeros, group_size, transposed=transposed + ) - if dtype == torch.bfloat16: + should_run_tinygemm = dtype == torch.bfloat16 and not transposed + if should_run_tinygemm: _ = quant_config["weight_quant_params"].pop("bitpack") hqq_int4mm = HQQLinearTorchWeightOnlyInt4( linear, quant_config, compute_dtype=dtype, del_orig=False ) - int4_time = bench_hqq(x, hqq_int4mm) + int4_time = bench_hqq(x, hqq_int4mm, transposed=transposed, tinygemm=True) - print(f"{shape=} {group_size=} {dtype=}:") + print(f"{shape=}, {group_size=}, {dtype=}, {transposed=}:") print( - f"Ref: {ref_time:.4f}", - f"Triton: {tt_time:.4f}", - f"Torch int4mm: {int4_time:.4f}" - if dtype == torch.bfloat16 - else "", + f"Ref: {ref_time:.4f}ms", + f"Triton: {tt_time:.4f}ms", + f"Torch int4mm: {int4_time:.4f}ms" if should_run_tinygemm else "", ) print() - return ref_time, tt_time, int4_time if dtype == torch.bfloat16 else None + return ( + ref_time, + tt_time, + int4_time if should_run_tinygemm else -1, + ) SHAPES = [ @@ -116,9 +138,9 @@ def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): [1024, 4096, 4096], ] -DTYPES = [torch.bfloat16] # , torch.float16] +DTYPES = [torch.bfloat16] #[torch.float16, torch.bfloat16] GROUP_SIZES = [128] - +TRANSPOSED = [True] #[False, True] HEADERS = [ "M", @@ -126,6 +148,7 @@ def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): "K", "group_size", "dtype", + "transposed", "ref", "triton", "tinygemm", @@ -138,10 +161,14 @@ def run_benchmark(shape, group_size, dtype, axis=1, quant_dtype=torch.uint8): for shape in SHAPES: for group_size in GROUP_SIZES: for dtype in DTYPES: - timings = run_benchmark(shape, group_size, dtype) - data.append((*shape, group_size, dtype, *timings)) + for transposed in TRANSPOSED: + timings = run_benchmark( + shape, group_size, dtype, transposed=transposed + ) + data.append((*shape, group_size, dtype, transposed, *timings)) output = StringIO() df = pd.DataFrame(data, columns=HEADERS) df.to_csv(output, index=False) - print(output.getvalue()) \ No newline at end of file + print(output.getvalue()) + # df.to_csv("benchmark_hqq_tinygemm.csv", index=False) \ No newline at end of file diff --git a/test/hqq/test_triton_mm.py b/test/hqq/test_triton_mm.py index 471ede4250..628723ea1c 100644 --- a/test/hqq/test_triton_mm.py +++ b/test/hqq/test_triton_mm.py @@ -1,15 +1,24 @@ -# Skip entire test if triton is not available, otherwise CI failure +# Skip entire test if following module not available, otherwise CI failure import pytest -triton = pytest.importorskip("triton", minversion="3.0.0", reason="Triton > 3.0.0 required to run this test") +triton = pytest.importorskip( + "triton", minversion="3.0.0", reason="Triton > 3.0.0 required to run this test" +) hqq = pytest.importorskip("hqq", reason="hqq required to run this test") -HQQLinear = pytest.importorskip("hqq.core.quantize.HQQLinear", reason="HQQLinear required to run this test") -BaseQuantizeConfig = pytest.importorskip("hqq.core.quantize.BaseQuantizeConfig", reason="HQQLinear required to run this test") +HQQLinear = pytest.importorskip( + "hqq.core.quantize.HQQLinear", reason="HQQLinear required to run this test" +) +BaseQuantizeConfig = pytest.importorskip( + "hqq.core.quantize.BaseQuantizeConfig", reason="HQQLinear required to run this test" +) -from torchao.prototype.hqq import triton_mixed_mm, pack_2xint4 +import itertools +import torch -#Test configs +from torchao.prototype.hqq import pack_2xint4, triton_mixed_mm + +# Test configs SHAPES = [ [16, 128, 128], [16, 4096, 4096], @@ -17,11 +26,13 @@ DTYPES = [torch.bfloat16, torch.float16] GROUP_SIZES = [64, 128] -AXES = [1] #Only axis = 1 supported -TRANSPOSED = [True] -TRITON_KERNEL_TYPE = ["compute_bound"] #["max_autotune", "compute_bound"] +AXES = [1] # Only axis = 1 supported +TRANSPOSED = [False, True] +TRITON_KERNEL_TYPE = ["compute_bound"] # ["max_autotune", "compute_bound"] -TEST_CONFIGS = list(itertools.product(SHAPES, GROUP_SIZES, AXES, DTYPES, TRANSPOSED, TRITON_KERNEL_TYPE)) +TEST_CONFIGS = list( + itertools.product(SHAPES, GROUP_SIZES, AXES, DTYPES, TRANSPOSED, TRITON_KERNEL_TYPE) +) BASE_QUANT_CONFIG = { "optimize": True, @@ -37,19 +48,40 @@ def check(expected, actual, msg="", max_diff=1e-3, verbose=False): if verbose: max_err = (expected - actual).abs().max() if not passed: - print(f"{msg}: Failed! Max error: {max_err}") + print_msg = f"{msg}:\nFailed! Max error: {max_err}" + try: + from termcolor import colored + except ImportError: + print(print_msg) + else: + print(colored(print_msg, "red", attrs=["bold"])) + else: - print(f"{msg}: Passed! Max error: {max_err}") + print_msg = f"{msg}:\nPassed! Max error: {max_err}" + try: + from termcolor import colored + except ImportError: + print(print_msg) + else: + print(colored(print_msg, "green", attrs=["bold"])) return passed + def _arg_to_id(arg): if isinstance(arg, list): return "x".join([str(x) for x in arg]) return str(arg) -@pytest.mark.parametrize("shape, group_size, axis, dtype, transposed, kernel_type", TEST_CONFIGS, ids=_arg_to_id) -def test_mixed_mm(shape, group_size, axis, dtype, transposed, kernel_type, quant_dtype=torch.uint8): + +@pytest.mark.parametrize( + "shape, group_size, axis, dtype, transposed, kernel_type", + TEST_CONFIGS, + ids=_arg_to_id, +) +def test_mixed_mm( + shape, group_size, axis, dtype, transposed, kernel_type, quant_dtype=torch.uint8 +): qcfg = { **BASE_QUANT_CONFIG, **dict(group_size=group_size, axis=axis), @@ -76,22 +108,144 @@ def test_mixed_mm(shape, group_size, axis, dtype, transposed, kernel_type, quant scales = scales.reshape(N, -1) zeros = zeros.reshape(N, -1) + packed_w = pack_2xint4(W_q.T) + if transposed: x = torch.randn(M, N, dtype=dtype, device="cuda") hqq_out = x @ W_dq - #Pack uint8 W_q, then run fused dequant matmul - packed_w = pack_2xint4(W_q) tt_out = triton_mixed_mm( - x, packed_w, scales, zeros, transposed=True, group_size=group_size, fp8_fast_accum=False, kernel_type=kernel_type + x, + packed_w, + scales.T, + zeros.T, + transposed=True, + group_size=group_size, + fp8_fast_accum=False, + kernel_type=kernel_type, ) + else: x = torch.randn(M, K, dtype=dtype, device="cuda") hqq_out = x @ W_dq.T - packed_w = pack_2xint4(W_q.T) tt_out = triton_mixed_mm( - x, packed_w, scales.T, zeros.T, transposed=False, group_size=group_size, fp8_fast_accum=False, kernel_type=kernel_type + x, + packed_w, + scales.T, + zeros.T, + transposed=False, + group_size=group_size, + fp8_fast_accum=False, + kernel_type=kernel_type, ) + assert check( + hqq_out, + tt_out, + max_diff=1e-2 if dtype == torch.bfloat16 else 1e-3, + verbose=True, + ) + - assert check(hqq_out, tt_out, max_diff=1e-2 if dtype == torch.bfloat16 else 1e-3) +# Only for debugging kernel without dependency on HQQ and with no autotuning +def _test_mixed_mm( + shape, + group_size, + BLOCK_M, + BLOCK_N, + BLOCK_K, + axis=1, + dtype=torch.float16, + transposed=True, + kernel_type="debug", + quant_dtype=torch.uint8, +): + qcfg = { + **BASE_QUANT_CONFIG, + **dict(group_size=group_size, axis=axis), + } + M, N, K = shape + + quant_config = BaseQuantizeConfig( + quant_zero=False, quant_scale=False, offload_meta=False, view_as_float=False + ) + quant_config.update({"weight_quant_params": qcfg}) + W_q = torch.randint(0, int(2**4), size=(N, K), dtype=quant_dtype, device="cuda") + + scales = torch.arange((N * K) // group_size, dtype=dtype, device="cuda")[:, None] + zeros = torch.zeros_like(scales) + W_dq = ((W_q.reshape(-1, group_size) - zeros) * scales).reshape(N, K) + scales = scales.reshape(N, -1) + zeros = zeros.reshape(N, -1) + + packed_w = pack_2xint4(W_q.T) + + if transposed: + x = torch.randn(M, N, dtype=dtype, device="cuda") + hqq_out = x @ W_dq + + tt_out = triton_mixed_mm( + x, + packed_w, + scales.T, + zeros.T, + transposed=True, + group_size=group_size, + fp8_fast_accum=False, + kernel_type=kernel_type, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_K=BLOCK_K, + ) + + else: + x = torch.randn(M, K, dtype=dtype, device="cuda") + hqq_out = x @ W_dq.T + + tt_out = triton_mixed_mm( + x, + packed_w, + scales.T, + zeros.T, + transposed=False, + group_size=group_size, + fp8_fast_accum=False, + kernel_type=kernel_type, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_K=BLOCK_K, + ) + msg = f"shape={shape}, group_size={group_size}, axis={axis}, dtype={dtype}, transposed={transposed}, kernel_type={kernel_type}, quant_dtype={quant_dtype}" + + check( + hqq_out, + tt_out, + msg=msg, + max_diff=1e-2 if dtype == torch.bfloat16 else 1e-3, + verbose=True, + ) + + +if __name__ == "__main__": + # _test_mixed_mm(transposed=False) + M, N, K = shape = [32, 128, 128] + BLOCK_M, BLOCK_N, BLOCK_K = shape + BLOCK_K = K // 2 + BLOCK_N = N // 2 + group_size = BLOCK_K + _test_mixed_mm( + shape, + group_size=group_size, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_K=BLOCK_K, + transposed=False, + ) + _test_mixed_mm( + shape, + group_size=group_size, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_K=BLOCK_K, + transposed=True, + ) diff --git a/torchao/prototype/hqq/README.md b/torchao/prototype/hqq/README.md index 22c40fd246..8bf1d34260 100644 --- a/torchao/prototype/hqq/README.md +++ b/torchao/prototype/hqq/README.md @@ -12,6 +12,53 @@ Tested and benchmarked for `HQQ` but could theoretically be used for any asymmet > **NOTE**: Benchmark below is only indicative of performance on consumer-grade `Ampere` GPUs (`A6000` specifically). When tested on `H100`, the performance is on par / marginally worse than native / compiled `torch`. > The intended use is thus for fine-tuning / training models on non-datacenter GPUs (`80 <= compute capability < 90`). If interested in optimizing the kernel for other architectures, please drop a note in the CUDA-MODE Discord channel. +### Usage + +Typical workflow: + +- quantize `float16 / bfloat16` weights to `s4 / u4` using a group-wise asymmetric quantization scheme, outputs are the quantized 4b weights stored as `torch.int8 / torch.uint8` +- pack weights using `pack_2xint4` such that 2 weights are packed per `torch.int8 / torch.uint8`. +- pass the packed weights, scales, and zeros to the kernel + +If running transposed matmul (e.g., for backwards passes during training), there is no need to unpack / re-pack the weights, simply pass `transposed=True` to the kernel. + +The pseudocode below explains the expected shapes and dtypes. Also see `test/hqq/test_triton_mm.py` for a concrete example of usage with `HQQ`. + +```python + +#The reason we use N x K is to match that shape of the weight for a torch.nn.Linear layer, where N -> out-features, K -> in-features +weights = torch.randn(N, K, dtype=torch.float16, device="cuda") + +#Perform groupwise asymmetric quantization along axis=1 (in-features). E.g., `scales = Wq.reshape(-1, group_size).max(axis=1)`. +#Wq are `s4 / u4` stored as dtype = torch.int8 / torch.uint8, shape N x K +# scales and zeros are shape (N * K // group_size) +Wq, scales, zeros = quantize(weights) #Choose your favorite quantization library + +#Pack i4 stored as i8 to packed 2xi4 i8. +#Note that we transpose W_q such that the packed shape is (K // 2) x N, and when unpacked K x N +packed_w = pack_2xint4(W_q.T) + +#Reshape scales such that they can be broadcasted within kernel +scales = scales.reshape(N, -1) +zeros = zeros.reshape(N, -1) + +#Sample input +x = torch.randn(M, K, dtype=torch.float16, device="cuda") + +#Run fused dequant matmul +#If running transposed case such as for backwards pass, +#switch transposed to True +tt_out = triton_mixed_mm( + x, + packed_w, + scales.T, + zeros.T, + transposed=False, + group_size=group_size, + fp8_fast_accum=False, + ) +``` + ### Implementation Details - Bitpacking is simple row interleave, no need for extensive preprocessing (e.g., `tinygemm` or `fastertransformer`) diff --git a/torchao/prototype/hqq/kernels.py b/torchao/prototype/hqq/kernels.py index 077fc94108..8409fcb68b 100644 --- a/torchao/prototype/hqq/kernels.py +++ b/torchao/prototype/hqq/kernels.py @@ -1,8 +1,8 @@ -from triton import Config -import triton.language as tl import triton +import triton.language as tl +from triton import Config -#TODO: add early config prune and estimate_matmul_time to reduce autotuning time +# TODO: add early config prune and estimate_matmul_time to reduce autotuning time # from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time @@ -141,22 +141,24 @@ def get_configs_compute_bound(): return configs - def init_to_zero(name): return lambda nargs: nargs[name].zero_() MIXED_MM_HEURISTICS = { "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0, - "BLOCK_K": lambda args: min(args["BLOCK_K"], args["QGROUP_SIZE"]) if not args["TRANSPOSED"] else args["BLOCK_K"], - "BLOCK_N": lambda args: min(args["BLOCK_N"], args["QGROUP_SIZE"]) if args["TRANSPOSED"] else args["BLOCK_N"], + "BLOCK_K": lambda args: min(args["BLOCK_K"], args["QGROUP_SIZE"]) + if not args["TRANSPOSED"] + else args["BLOCK_K"], + "BLOCK_N": lambda args: min(args["BLOCK_N"], args["QGROUP_SIZE"]) + if args["TRANSPOSED"] + else args["BLOCK_N"], "SPLIT_K": lambda args: 1 if args["IS_BFLOAT16"] else args["SPLIT_K"], # atomic add not supported for bfloat16 } - @triton.jit def _mixed_mm_kernel( # Operands @@ -181,17 +183,19 @@ def _mixed_mm_kernel( # Meta-params IS_BFLOAT16: tl.constexpr, QGROUP_SIZE: tl.constexpr, - BLOCK_M: tl.constexpr, # = 32, - BLOCK_N: tl.constexpr, # = 32, - BLOCK_K: tl.constexpr, # = 16, # - SPLIT_K: tl.constexpr, # = 1, - EVEN_K: tl.constexpr, # = True, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_K: tl.constexpr, + SPLIT_K: tl.constexpr, + EVEN_K: tl.constexpr, TRANSPOSED: tl.constexpr = False, - GROUP_M: tl.constexpr = 8, # 32, + GROUP_M: tl.constexpr = 8, # tl.dot options acc_dtype: tl.constexpr = tl.float32, input_precision: tl.constexpr = "ieee", fp8_fast_accum: tl.constexpr = False, + # Only used for debugging + DEBUG: tl.constexpr = False, ): """Mixed matmul kernel @@ -203,16 +207,29 @@ def _mixed_mm_kernel( QGROUP_SIZE should be a multiple of BLOCK_K such that a vector of scales / zeros is loaded and broadcasted to block shape per mainloop iteration. + In the transposed case, A is M x N and B is K x N, and we reduce along "N": + - TLDR: we are loading rows of A and B blocks at a time, dequantizing and transposing each block of B to achieve the overall + effect of a transposed matmul. This is necessary to perform a transposed matmul without unpacking and repacking the B matrix. + - Indexing remains the same for A (the reduction dim (BLK_K / K) corresponds to axis 1 of A -- "N" above) + - We load a BLK_M x BLK_K block of A + - Indexing for B is now flipped: N <-> K + - We load BLK_N x BLK_K block of B (remembering that the reduction dimension is axis 1 of B) + - We dequantize and transpose to BLK_K x BLK_N + - scale / zero indexing also change, since we are now iterating along the non-grouping dim within the mac loop and along + the grouping dim across blocks. + - Each mac loop calculates BLK_M x BLK_N -> M x "N"(= K) + - Within the mac loop for each block, we iterate along axis=1 for **both** A and B since axis = 1 is now the reduction dim for B. + NOTE: Assumes that the quantization grouping was done along the K dimension originally (i.e., QGROUP_SIZE consecutive elements of original weight matrix in the K dimension were grouped together when calculating min / max scaling factors). """ - # tl.static_assert(B.dtype.element_ty == tl.int8 or B.dtype.element_ty == tl.uint8) if not TRANSPOSED: tl.static_assert(QGROUP_SIZE % BLOCK_K == 0) else: tl.static_assert(QGROUP_SIZE % BLOCK_N == 0) - + + # TODO: refactor swizzling to separate function # Threadblock swizzling pid = tl.program_id(0) pid_z = tl.program_id(1) @@ -227,38 +244,54 @@ def _mixed_mm_kernel( pid_n = (pid % width) // group_size rm = (pid_m * BLOCK_M + tl.arange(0, BLOCK_M)) % M - rn = (pid_n * BLOCK_N + tl.arange(0, BLOCK_N)) % N - ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) - rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) - + if not DEBUG: + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + else: + ram = rm + # rn = (pid_n * BLOCK_N + tl.arange(0, BLOCK_N)) % N + # rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) rak = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) # BLOCK_K for b is effectively BLOCK_K // 2 - rbk = pid_z * BLOCK_K // 2 + tl.arange(0, BLOCK_K // 2) + if not TRANSPOSED: + rn = (pid_n * BLOCK_N + tl.arange(0, BLOCK_N)) % N + if not DEBUG: + rbn = tl.max_contiguous( + tl.multiple_of(rn % N, BLOCK_N), BLOCK_N + ) # rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + else: + rbn = rn + rbk = pid_z * BLOCK_K // 2 + tl.arange(0, BLOCK_K // 2) + else: + rn = (pid_n * BLOCK_N // 2 + tl.arange(0, BLOCK_N // 2)) % N + if not DEBUG: + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N // 2), BLOCK_N // 2) + else: + rbn = rn + rbk = rak A = A + (ram[:, None] * stride_am + rak[None, :] * stride_ak) - B = B + (rbk[:, None] * stride_bk + rbn[None, :] * stride_bn) - - #In the forward pass, we have a K x N matrix - #In the transposed (backward) pass, we have an N x K matrix, where N and K refer to the how the weight was originally quantized - #note that N refers to offsets_scale_k and K refers to offsets_scale_n when it comes to the gemm indexing logic below - - #Grouping is along K, so in the forward pass, each block loads a row vector of BLK_K x BLK_N - #where grouping varies along N, hence the mainloop marches down the K dimension, where - #group idx is given by K // QGROUP_SIZE - - # For the transposed case, we load a column vector of BLK_N x BLK_K - # we march down the N dimension during the mainloop ("K" in gemm) - # Hence blocks now load K // QGROUP_SIZE along pid_n (slow varying) - # while each block now loads column vector of groups along "K" gemm dim on each main loop iteration - # scale offsets is thus a single idx along "N" and range along "K" for the transposed case - + if not TRANSPOSED: - # scale_offset_n = pid_n * stride_scale_n * BLOCK_N - offsets_scale_n = pid_n * stride_scale_n * BLOCK_N + tl.arange(0, BLOCK_N) * stride_scale_n + B = B + (rbk[:, None] * stride_bk + rbn[None, :] * stride_bn) else: - offsets_scale_n = pid_n * stride_scale_n * BLOCK_N // QGROUP_SIZE - + # Note: in the transposed case, we are loading BLK_N x BLK_K, but we need to transpose to BLK_K x BLK_N + # the strides are adjusted accordingly, since we to stride by stride_bk to get rows of BLK_N + # and stride_bn to get columns of BLK_K + B = B + (rbn[:, None] * stride_bk + rbk[None, :] * stride_bn) + + # Grouping is along K, so in the forward pass, each block loads a row vector of BLK_K x BLK_N + # where grouping varies along N, hence the mainloop marches down the K dimension, where + # group idx is given by K // QGROUP_SIZE + + if not TRANSPOSED: + offsets_scale_n = ( + pid_n * stride_scale_n * BLOCK_N + tl.arange(0, BLOCK_N) * stride_scale_n + ) + else: + scale_offset_k = pid_n * BLOCK_N * stride_scale_k // QGROUP_SIZE + offsets_scale_n = tl.arange(0, BLOCK_K) * stride_scale_n + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=acc_dtype) for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): if EVEN_K: @@ -266,7 +299,12 @@ def _mixed_mm_kernel( qb = tl.load(B) else: k_remaining_a = K - k * (BLOCK_K * SPLIT_K) - k_remaining_b = K - k * (BLOCK_K * SPLIT_K) // 2 # Note the division by 2 + if not TRANSPOSED: + k_remaining_b = ( + K - k * (BLOCK_K * SPLIT_K) // 2 + ) # Note the division by 2 + else: + k_remaining_b = K - k * (BLOCK_K * SPLIT_K) # = k_remaining_a _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) a = tl.load(A, mask=rak[None, :] < k_remaining_a, other=_0) @@ -275,8 +313,10 @@ def _mixed_mm_kernel( if not TRANSPOSED: scale_offset_k = k * BLOCK_K * SPLIT_K * stride_scale_k // QGROUP_SIZE else: - scale_offset_k = k * BLOCK_K * SPLIT_K * stride_scale_k + tl.arange(0, BLOCK_K) * stride_scale_k - + offsets_scale_n = ( + k * stride_scale_n * BLOCK_K + tl.arange(0, BLOCK_K) * stride_scale_n + ) + scales = tl.load(scales_ptr + offsets_scale_n + scale_offset_k) zeros = tl.load(zeros_ptr + offsets_scale_n + scale_offset_k) @@ -287,38 +327,38 @@ def _mixed_mm_kernel( # Upcast to fp16 # TODO: better bfloat16 conversion? compilation error if direct conversion from int8 to bfloat16 - if IS_BFLOAT16: + if IS_BFLOAT16: dq_b = ( tl.join( qb_lo.to(tl.float16).to(A.dtype.element_ty), qb_hi.to(tl.float16).to(A.dtype.element_ty), - ) - .permute(0, 2, 1) - .reshape(BLOCK_K, BLOCK_N) + ).permute(0, 2, 1) + # .reshape(BLOCK_K, BLOCK_N) ) else: dq_b = ( tl.join( qb_lo.to(A.dtype.element_ty), qb_hi.to(A.dtype.element_ty), - ) - .permute(0, 2, 1) - .reshape(BLOCK_K, BLOCK_N) + ).permute(0, 2, 1) + # .reshape(BLOCK_K, BLOCK_N) ) + if not TRANSPOSED: + dq_b = dq_b.reshape(BLOCK_K, BLOCK_N) + else: + dq_b = dq_b.reshape(BLOCK_N, BLOCK_K) # Scale upcasted weights # Note that we broadcast the scales --> the assumption is that all scales fall within a single QGROUP # This condition is statically check (see assertions above) - if not TRANSPOSED: - zeros = zeros[None, :] - scales = scales[None, :] - else: - zeros = zeros[:, None] - scales = scales[:, None] - + + zeros = zeros[None, :] + scales = scales[None, :] + dq_b = (dq_b - zeros) * scales - # dq_b = (dq_b - zeros[None, :]) * scales[None, :] + if TRANSPOSED: + dq_b = tl.trans(dq_b) if fp8_fast_accum: acc = tl.dot( @@ -327,9 +367,13 @@ def _mixed_mm_kernel( else: acc += tl.dot(a, dq_b, out_dtype=acc_dtype, input_precision=input_precision) A += BLOCK_K * SPLIT_K * stride_ak - # Advance by half the block size, since each block is unpacked and upcasted into two fp16 values - B += BLOCK_K * SPLIT_K * stride_bk // 2 + # Advance by half the block size, since each block is unpacked and upcasted into two fp16 values + if not TRANSPOSED: + B += BLOCK_K * SPLIT_K * stride_bk // 2 + else: + # we iterating across a row of B (non-packing dim, hence no need for div 2) + B += BLOCK_K * SPLIT_K * stride_bn acc = acc.to(C.dtype.element_ty) offs_cm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) @@ -344,5 +388,10 @@ def _mixed_mm_kernel( _mixed_mm = triton.heuristics(MIXED_MM_HEURISTICS)(_mixed_mm_kernel) -mixed_mm_kernel_max_autotune = triton.autotune(configs=get_configs_compute_bound() + get_configs_io_bound(), key=["M", "N", "K"])(_mixed_mm) -mixed_mm_kernel_compute_bound = triton.autotune(configs=get_configs_compute_bound(), key=["M", "N", "K"])(_mixed_mm) +mixed_mm_kernel_max_autotune = triton.autotune( + configs=get_configs_compute_bound() + get_configs_io_bound(), key=["M", "N", "K"] +)(_mixed_mm) +mixed_mm_kernel_compute_bound = triton.autotune( + configs=get_configs_compute_bound(), key=["M", "N", "K"] +)(_mixed_mm) +_mixed_mm_debug = _mixed_mm \ No newline at end of file diff --git a/torchao/prototype/hqq/mixed_mm.py b/torchao/prototype/hqq/mixed_mm.py index e3ccaeb46e..6a933fa98c 100644 --- a/torchao/prototype/hqq/mixed_mm.py +++ b/torchao/prototype/hqq/mixed_mm.py @@ -1,10 +1,15 @@ import torch -from triton import cdiv import triton.language as tl -from .kernels import mixed_mm_kernel_compute_bound, mixed_mm_kernel_max_autotune +from triton import cdiv + +from .kernels import ( + mixed_mm_kernel_compute_bound, + mixed_mm_kernel_max_autotune, +) -#h/t jlebar for the bit packing / unpacking logic (source: Triton Slack thread) -#https://gist.github.com/jlebar/3435b2c00deea53258887ce37231e5e2 + +# h/t jlebar for the bit packing / unpacking logic (source: Triton Slack thread) +# https://gist.github.com/jlebar/3435b2c00deea53258887ce37231e5e2 def pack_2xint4(t): """ The packing format is such that consecutive rows are packed into a lower / upper bits @@ -27,6 +32,7 @@ def pack_2xint4(t): t = t.reshape(t.shape[0] // 2, 2, t.shape[1]).permute(1, 0, 2) return (t[0] & 0xF) | (t[1] << 4) + def triton_mixed_mm( a, b, @@ -38,7 +44,30 @@ def triton_mixed_mm( input_precision="ieee", fp8_fast_accum=False, kernel_type="compute_bound", + # For debugging only + BLOCK_M=None, + BLOCK_N=None, + BLOCK_K=None, ): + """Run fused int4 / fp16 dequant GEMM + + Args: + a (torch.Tensor): M x K if not transposed, M x N if transposed + b (torch.Tensor): (K // 2) x N, packed such that 2 int4's are packed into 1 uint8 (see pack_2xint4) + scales (torch.Tensor): (num_groups x N), where num_groups = (N * K / group_size) + zeros (torch.Tensor): same shape as scales + group_size (torch.Tensor): size of group in groupwise quantization -- MUST be along axis 1 of an N x K matrix + transposed (bool, optional): Whether to run a transposed matmul where shapes are (M x N) x (K x N) => (M x K) + acc_dtype (_type_, optional): dtype of accumulator. Defaults to None, which corresponds to tl.float32. + input_precision (str, optional): Only relevant when dtype of a is torch.float32. Defaults to "ieee". + kernel_type (str, optional): Type of autoconfig to use. Either "max_autotune" or "compute_bound". + BLOCK_M (int, optional): Only for debugging. Defaults to None. + BLOCK_N (int, optional): Only for debugging. Defaults to None. + BLOCK_K (int, optional): Only for debugging. Defaults to None. + + Returns: + c (torch.Tensor): M x N + """ device = a.device # handle non-contiguous inputs if necessary if a.stride(0) > 1 and a.stride(1) > 1: @@ -46,14 +75,18 @@ def triton_mixed_mm( if b.stride(0) > 1 and b.stride(1) > 1: b = b.contiguous() # checks constraints - assert a.shape[1] == b.shape[0] * 2, "incompatible dimensions" + if not transposed: + assert a.shape[1] == b.shape[0] * 2, "incompatible dimensions" + assert b.dtype == torch.int8 or b.dtype == torch.uint8, "b must be int8 or uint8" assert scales.ndim == 2 - assert kernel_type in ["max_autotune", "compute_bound"] - + if transposed: + assert ( + a.shape[1] == b.shape[1] + ), "transpose requires (M x N) x (K x N), where reduction dim is N" + M, K = a.shape - _, N = b.shape - # N = b.shape[1] if not transposed else b.shape[0] + N = b.shape[1] if not transposed else b.shape[0] * 2 # assert scales.shape[1] == N if not transposed else scales.shape[0] == N # assert scales.shape[0] == K // group_size if not transposed else scales.shape[1] == K // group_size assert scales.dtype == a.dtype @@ -72,10 +105,15 @@ def triton_mixed_mm( if kernel_type == "max_autotune": kernel = mixed_mm_kernel_max_autotune - else: + elif kernel_type == "compute_bound": kernel = mixed_mm_kernel_compute_bound - - kernel[grid]( + else: + from .kernels import _mixed_mm_debug + + kernel = _mixed_mm_debug + + if kernel_type == "max_autotune" or kernel_type == "compute_bound": + kernel[grid]( a, b, scales, @@ -99,4 +137,38 @@ def triton_mixed_mm( input_precision=input_precision, fp8_fast_accum=fp8_fast_accum, ) - return c + else: + assert all([BLOCK_M is not None, BLOCK_N is not None, BLOCK_K is not None]) + grid = (M // BLOCK_M * N // BLOCK_N, 1, 1) + kernel[grid]( + a, + b, + scales, + zeros, + c, + M, + N, + K, # + a.stride(0), + a.stride(1), # + b.stride(0), + b.stride(1), # + c.stride(0), + c.stride(1), + scales.stride(0), + scales.stride(1), + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_K=BLOCK_K, + SPLIT_K=1, + EVEN_K=True, + TRANSPOSED=transposed, + IS_BFLOAT16=a.dtype == torch.bfloat16, + QGROUP_SIZE=group_size, + acc_dtype=acc_dtype, + input_precision=input_precision, + fp8_fast_accum=fp8_fast_accum, + DEBUG=True, + ) + + return c \ No newline at end of file From 1b8a0a896bb6bcc0608f85c4764f67effaa65efd Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 3 May 2024 17:10:07 -0700 Subject: [PATCH 12/61] Add test for torch.export.export (#213) Summary: att Test Plan: python test/integration/test_integration/py -k TestExport Reviewers: Subscribers: Tasks: Tags: Co-authored-by: Mark Saroufim --- test/integration/test_integration.py | 54 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 5 deletions(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 87efb3962a..0d11093fd1 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1428,6 +1428,7 @@ class TestAOTI(unittest.TestCase): @parameterized.expand( list(itertools.product(TENSOR_SUBCLASS_APIS, COMMON_DEVICES, COMMON_DTYPES)), ) + @run_supported_device_dtype def test_aoti(self, api, test_device, test_dtype): if not TORCH_VERSION_AFTER_2_4: self.skipTest("aoti compatibility requires 2.4+.") @@ -1442,11 +1443,6 @@ def test_aoti(self, api, test_device, test_dtype): if test_dtype != torch.bfloat16: self.skipTest(f"{api} in {test_dtype} is not support for aoti compilation yet") - if test_device == "cuda" and not torch.cuda.is_available(): - self.skipTest(f"Need CUDA available.") - if test_device == "cuda" and torch.cuda.is_available() and test_dtype == torch.bfloat16 and torch.cuda.get_device_capability() < (8, 0): - self.skipTest("Need CUDA and SM80+ available.") - m, k, n = 32, 64, 32 class test_model(nn.Module): @@ -1479,5 +1475,53 @@ def forward(self, x): torch._export.aot_compile(model, example_inputs) +class TestExport(unittest.TestCase): + @parameterized.expand( + list(itertools.product(TENSOR_SUBCLASS_APIS, COMMON_DEVICES, COMMON_DTYPES)), + ) + @run_supported_device_dtype + def test_aoti(self, api, test_device, test_dtype): + if not TORCH_VERSION_AFTER_2_4: + self.skipTest("aoti compatibility requires 2.4+.") + + logger.info(f"TestExport: {api}, {test_device}, {test_dtype}") + + if test_dtype != torch.bfloat16: + self.skipTest(f"{api} in {test_dtype} is not support for aoti compilation yet") + + m, k, n = 32, 64, 32 + + class test_model(nn.Module): + def __init__(self): + super().__init__() + self.lin1 = nn.Linear(k, n) + self.relu = nn.ReLU() + self.lin2 = nn.Linear(n, n) + + def forward(self, x): + x = self.lin1(x) + x = self.relu(x) + x = self.lin2(x) + return x + + x = torch.randn(m, k, dtype=test_dtype, device=test_device) + + # get float reference + model = test_model().to(dtype=test_dtype, device=test_device).eval() + ref_f = model(x) + + kwargs = {"dtype": test_dtype} + api(model, **kwargs) + + # running model + ref = model(x) + + # make sure it compiles + example_inputs = (x,) + model = torch.export.export(model, example_inputs).module() + after_export = model(x) + self.assertTrue(torch.equal(after_export, ref)) + + if __name__ == "__main__": unittest.main() From 2dc57a8f43793c31bcf1c5292196cda89b676f91 Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Sat, 4 May 2024 11:40:54 -0400 Subject: [PATCH 13/61] Add option to disable fake quant for 8da4w QAT (#198) Summary: This feature helps with model convergence during QAT. The user can disable observation/fake quant for the first N steps and renable them later, allowing the activation and weight values to stabilize before applying quantization. Test Plan: python test/quantization/test_qat.py -k test_qat_8da4w_quantizer_disable_fake_quant python test/quantization/test_qat.py -k test_qat_8da4w_quantizer_disable_fake_quant_backward Reviewers: jerryzh168, cpuhrsch Subscribers: jerryzh168, cpuhrsch, supriyar --- test/quantization/test_qat.py | 98 +++++++++++++++++++++++++++ torchao/quantization/prototype/qat.py | 67 ++++++++++++------ 2 files changed, 145 insertions(+), 20 deletions(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index 031e5ef14d..3a12d9b636 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -200,6 +200,104 @@ def test_qat_8da4w_quantizer(self): for k in ptq_state_dict.keys(): torch.testing.assert_close(ptq_state_dict[k], converted_state_dict[k], atol=0, rtol=0) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + def test_qat_8da4w_quantizer_disable_fake_quant(self): + """ + Test that 8da4w QAT with disabled fake quant matches nn.Linear in forward. + """ + from torchao.quantization.prototype.qat import ( + Int8DynActInt4WeightQATQuantizer, + disable_8da4w_fake_quant, + enable_8da4w_fake_quant, + ) + + group_size = 16 + torch.manual_seed(self.SEED) + m = M() + m2 = copy.deepcopy(m) + m3 = copy.deepcopy(m) + quantizer = Int8DynActInt4WeightQATQuantizer(groupsize=group_size) + qat_model = quantizer.prepare(m) + qat_model.apply(disable_8da4w_fake_quant) + self.assertFalse(qat_model.linear1._fake_quant_enabled) + self.assertFalse(qat_model.linear2._fake_quant_enabled) + self.assertFalse(qat_model.sub.linear._fake_quant_enabled) + + # Disabled fake quant is just a normal linear + m2.linear1.weight = qat_model.linear1.weight + m2.linear2.weight = qat_model.linear2.weight + m2.sub.linear.weight = qat_model.sub.linear.weight + torch.manual_seed(self.SEED) + x = m.example_inputs() + x2 = copy.deepcopy(x) + qat_out = qat_model(*x) + nn_out = m2(*x2) + torch.testing.assert_close(nn_out, qat_out, atol=0, rtol=0) + + # Renable fake quant + qat_model.apply(enable_8da4w_fake_quant) + self.assertTrue(qat_model.linear1._fake_quant_enabled) + self.assertTrue(qat_model.linear2._fake_quant_enabled) + self.assertTrue(qat_model.sub.linear._fake_quant_enabled) + + # Fake quant should be applied as normal + quantizer2 = Int8DynActInt4WeightQATQuantizer(groupsize=group_size) + qat_model2 = quantizer2.prepare(m3) + qat_model2.linear1.weight = qat_model.linear1.weight + qat_model2.linear2.weight = qat_model.linear2.weight + qat_model2.sub.linear.weight = qat_model.sub.linear.weight + torch.manual_seed(self.SEED) + x = m.example_inputs() + x2 = copy.deepcopy(x) + qat_out = qat_model(*x) + qat_out2 = qat_model2(*x2) + torch.testing.assert_close(qat_out, qat_out2, atol=0, rtol=0) + + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + def test_qat_8da4w_quantizer_disable_fake_quant_backward(self): + """ + Test that 8da4w QAT with disabled fake quant matches nn.Linear in backward. + """ + from torchao.quantization.prototype.qat import ( + Int8DynActInt4WeightQATQuantizer, + disable_8da4w_fake_quant, + ) + + group_size = 16 + torch.manual_seed(self.SEED) + m = M() + nn_model = copy.deepcopy(m) + quantizer = Int8DynActInt4WeightQATQuantizer(groupsize=group_size) + qat_model = quantizer.prepare(m) + qat_model.apply(disable_8da4w_fake_quant) + nn_model.linear1.weight = qat_model.linear1.weight + nn_model.linear2.weight = qat_model.linear2.weight + nn_model.sub.linear.weight = qat_model.sub.linear.weight + + # Simulate training for both models + optimizer1 = torch.optim.SGD(nn_model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-5) + optimizer2 = torch.optim.SGD(qat_model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-5) + loss_fn1 = torch.nn.CrossEntropyLoss() + loss_fn2 = torch.nn.CrossEntropyLoss() + example_inputs = nn_model.example_inputs() + target = torch.randn(1, 64).float() + output1 = nn_model(*example_inputs) + output2 = qat_model(*example_inputs) + torch.testing.assert_close(output1, output2, atol=0, rtol=0) + loss1 = loss_fn1(output1, target) + loss2 = loss_fn2(output2, target) + optimizer1.zero_grad() + optimizer2.zero_grad() + loss1.backward() + loss2.backward() + optimizer1.step() + optimizer2.step() + + # After 1 training step, weights should match exactly + torch.testing.assert_close(nn_model.linear1.weight, qat_model.linear1.weight, atol=0, rtol=0) + torch.testing.assert_close(nn_model.linear2.weight, qat_model.linear2.weight, atol=0, rtol=0) + torch.testing.assert_close(nn_model.sub.linear.weight, qat_model.sub.linear.weight, atol=0, rtol=0) + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/prototype/qat.py b/torchao/quantization/prototype/qat.py index 4271d68e9b..7901fa8b5b 100644 --- a/torchao/quantization/prototype/qat.py +++ b/torchao/quantization/prototype/qat.py @@ -4,7 +4,7 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Tuple +from typing import Any, Optional, Tuple import torch from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib @@ -129,30 +129,43 @@ def __init__( self.groupsize = groupsize self.precision = precision self.scales_precision = scales_precision + self._fake_quant_enabled = True + + def enable_fake_quant(self, enabled: bool = True): + self._fake_quant_enabled = enabled + + def disable_fake_quant(self): + self.enable_fake_quant(False) def forward(self, x: torch.Tensor) -> torch.Tensor: # activations: int8 dynamic asymmetric quant - (act_qmin, act_qmax) = self._get_qmin_qmax(8) - (act_scales, act_zp) = _choose_qparams_per_token_asymmetric( - x, torch.int8, # dtype not used - ) - x_fq = fake_quantize_per_token( - x, act_scales, act_zp, act_qmin, act_qmax, - ) + if self._fake_quant_enabled: + (act_scales, act_zp) =_choose_qparams_per_token_asymmetric( + x, torch.int8, # dtype not used + ) + (act_qmin, act_qmax) = self._get_qmin_qmax(8) + x_fq = fake_quantize_per_token( + x, act_scales, act_zp, act_qmin, act_qmax, + ) + else: + x_fq = x # weights: int4 grouped per channel symmetric quant - (weight_qmin, weight_qmax) = self._get_qmin_qmax(4) - (weight_scales, weight_zp) = get_group_qparams_symmetric( - self.weight, 4, self.groupsize, self.scales_precision, - ) - w_fq = fake_quantize_per_channel_group( - self.weight, - weight_scales, - weight_zp, - weight_qmin, - weight_qmax, - self.groupsize, - ) + if self._fake_quant_enabled: + (weight_scales, weight_zp) = get_group_qparams_symmetric( + self.weight, 4, self.groupsize, self.scales_precision, + ) + (weight_qmin, weight_qmax) = self._get_qmin_qmax(4) + w_fq = fake_quantize_per_channel_group( + self.weight, + weight_scales, + weight_zp, + weight_qmin, + weight_qmax, + self.groupsize, + ) + else: + w_fq = self.weight return torch.nn.functional.linear(x_fq, w_fq) # TODO: move this to common util @@ -161,6 +174,20 @@ def _get_qmin_qmax(self, n_bit: int): qmax = 2 ** (n_bit - 1) - 1 return (qmin, qmax) + def enable_8da4w_fake_quant(mod: torch.nn.Module): + """ + Enable fake quantization for `Int8DynActInt4WeightQATLinear`. + """ + if isinstance(mod, Int8DynActInt4WeightQATLinear): + mod.enable_fake_quant() + + def disable_8da4w_fake_quant(mod: torch.nn.Module): + """ + Disable fake quantization for `Int8DynActInt4WeightQATLinear`. + """ + if isinstance(mod, Int8DynActInt4WeightQATLinear): + mod.disable_fake_quant() + # ======================== # | QUANT PRIMITIVES | From dfecf20ecb3ba6b16ce9e836e0d5b57d3f3fda0d Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Sat, 4 May 2024 15:28:06 -0700 Subject: [PATCH 14/61] Update Regression Test to Unpinned Nightly (#215) --- .github/workflows/regression_test.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_test.yml index cc72c57512..a779f6cf0d 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_test.yml @@ -31,9 +31,9 @@ jobs: torch-spec: 'torch==2.3.0' gpu-arch-type: "cuda" gpu-arch-version: "12.1" - - name: CUDA 2.4.0.dev20240428 + - name: CUDA Nightly runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: '--pre torch==2.4.0.dev20240428+cu121 --index-url https://download.pytorch.org/whl/nightly/cu121' + torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cu121' gpu-arch-type: "cuda" gpu-arch-version: "12.1" - name: CPU 2.2.2 @@ -46,7 +46,7 @@ jobs: torch-spec: 'torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu' gpu-arch-type: "cpu" gpu-arch-version: "" - - name: Nightly CPU + - name: CPU Nightly runs-on: linux.4xlarge torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cpu' gpu-arch-type: "cpu" From 58b0899f7a9a0b53a10947ce465f6025b82f5d59 Mon Sep 17 00:00:00 2001 From: Keren Zhou Date: Sun, 5 May 2024 23:53:19 -0400 Subject: [PATCH 15/61] Fix the URLs of web pages (#217) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8f1c805100..80f4a932d5 100644 --- a/README.md +++ b/README.md @@ -123,9 +123,9 @@ torchao has been integrated with other libraries including ## Success stories Our kernels have been used to achieve SOTA inference performance on -* Image segmentation models with [sam-fast](pytorch.org/blog/accelerating-generative-ai) -* Language models with [gpt-fast](pytorch.org/blog/accelerating-generative-ai-2) -* Diffusion models with [sd-fast](pytorch.org/blog/accelerating-generative-ai-3) +* Image segmentation models with [sam-fast](https://pytorch.org/blog/accelerating-generative-ai) +* Language models with [gpt-fast](https://pytorch.org/blog/accelerating-generative-ai-2) +* Diffusion models with [sd-fast](https://pytorch.org/blog/accelerating-generative-ai-3) ## License From ce78e79c0aacdd68ffa1cfd69590d2c3f8a21b8e Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Mon, 6 May 2024 12:32:31 -0400 Subject: [PATCH 16/61] Copy weights and preserve device for 8da4w QAT linear (#211) * Copy weights and preserve device for 8da4w QAT linear Summary: This fixes two correctness bugs. First, we never copied over the weights from the existing linear, so we would start from random weights even when loading from checkpoints. Second, we never preserved the device of the original linear. This is important for settings like FSDP, where we expect non-zero ranks to have their parameters on the meta device in order to initialize these parameters correctly. Test Plan: python test/quantization/test_qat.py -k test_qat_8da4w_quantizer python test/quantization/test_qat.py -k test_qat_8da4w_quantizer_meta_weights Reviewers: jerryzh168, cpuhrsch Subscribers: jerryzh168, cpuhrsch, supriyar * Update test_qat.py --- test/quantization/test_qat.py | 23 ++++++++++++----------- torchao/quantization/GPTQ.py | 27 ++++++++++++++++----------- torchao/quantization/prototype/qat.py | 4 +++- 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index 3a12d9b636..a0587d3ff0 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -169,17 +169,6 @@ def test_qat_8da4w_quantizer(self): qat_model = qat_quantizer.prepare(m) ptq_model = ptq_quantizer.quantize(m2) - # Force the weights to be the same - self._set_ptq_weight( - ptq_model.linear1, qat_model.linear1.weight, group_size, - ) - self._set_ptq_weight( - ptq_model.sub.linear, qat_model.sub.linear.weight, group_size, - ) - self._set_ptq_weight( - ptq_model.linear2, qat_model.linear2.weight, group_size, - ) - # Compare model values torch.manual_seed(self.SEED) x = m.example_inputs() @@ -200,6 +189,18 @@ def test_qat_8da4w_quantizer(self): for k in ptq_state_dict.keys(): torch.testing.assert_close(ptq_state_dict[k], converted_state_dict[k], atol=0, rtol=0) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + def test_qat_8da4w_quantizer_meta_weights(self): + from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATQuantizer + + with torch.device("meta"): + m = M() + self.assertTrue(all(v.is_meta for v in m.state_dict().values())) + group_size = 16 + qat_quantizer = Int8DynActInt4WeightQATQuantizer(groupsize=group_size) + qat_model = qat_quantizer.prepare(m) + self.assertTrue(all(v.is_meta for v in qat_model.state_dict().values())) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") def test_qat_8da4w_quantizer_disable_fake_quant(self): """ diff --git a/torchao/quantization/GPTQ.py b/torchao/quantization/GPTQ.py index 3e007ec75b..e7176b4fd2 100644 --- a/torchao/quantization/GPTQ.py +++ b/torchao/quantization/GPTQ.py @@ -1127,22 +1127,26 @@ def _replace_linear_8da4w( precision: torch.dtype, scales_precision: torch.dtype, linear_class: Type[torch.nn.Module], + copy_weights: bool = False, ): for name, child in module.named_children(): if isinstance(child, nn.Linear): if _check_linear_int4_k(child.in_features, groupsize) or padding_allowed: - setattr( - module, - name, - linear_class( - child.in_features, - child.out_features, - bias=False, - groupsize=groupsize, - precision=precision, - scales_precision=scales_precision, - ), + new_linear = linear_class( + child.in_features, + child.out_features, + bias=False, + device=child.weight.device, + groupsize=groupsize, + precision=precision, + scales_precision=scales_precision, ) + # In distributed training, the model may be instantiated + # on the meta device, in which case there is no need to + # copy the weights, and doing so will result in an error + if copy_weights and child.weight.device != torch.device("meta"): + new_linear.weight = child.weight + setattr(module, name, new_linear) else: _replace_linear_8da4w( child, @@ -1151,6 +1155,7 @@ def _replace_linear_8da4w( precision, scales_precision, linear_class, + copy_weights, ) def replace_linear_8da4w( diff --git a/torchao/quantization/prototype/qat.py b/torchao/quantization/prototype/qat.py index 7901fa8b5b..d15e841d74 100644 --- a/torchao/quantization/prototype/qat.py +++ b/torchao/quantization/prototype/qat.py @@ -54,6 +54,7 @@ def prepare( self.precision, self.scales_precision, Int8DynActInt4WeightQATLinear, + copy_weights = True, ) return model @@ -111,6 +112,7 @@ def __init__( in_features: int, out_features: int, bias: bool = False, + device: torch.device = None, groupsize: int = 256, precision: torch.dtype = torch.float32, scales_precision: torch.dtype = torch.float32, @@ -119,7 +121,7 @@ def __init__( in_features, out_features, bias, - device=None, + device=device, dtype=precision, ) assert ( From 4852000e982e2ace00978ec918205fbae0ec29c9 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Mon, 6 May 2024 10:47:17 -0700 Subject: [PATCH 17/61] Don't import _C in fbcode (#218) Summary: since we don't have compilation command in fbcode, and we don't need to use cuda kernels right now in fbcode Test Plan: internal CI Reviewers: Subscribers: Tasks: Tags: --- test/test_ops.py | 3 ++- torchao/__init__.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index 6e84d138ad..a569f24799 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1,5 +1,5 @@ import torch -from torch.testing._internal.common_utils import TestCase +from torch.testing._internal.common_utils import TestCase, IS_FBCODE from torch.testing._internal.optests import opcheck import torchao from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 @@ -8,6 +8,7 @@ # torch.testing._internal.optests.generate_tests.OpCheckError: opcheck(op, ...): # test_faketensor failed with module 'torch' has no attribute '_custom_ops' (scroll up for stack trace) +@unittest.skipIf(IS_FBCODE, "Skipping the test in fbcode since we don't have TARGET file for kernels") class TestOps(TestCase): def _create_tensors_with_iou(self, N, iou_thresh): # force last box to have a pre-defined iou with the first box diff --git a/torchao/__init__.py b/torchao/__init__.py index d9b73e3583..340bfe3013 100644 --- a/torchao/__init__.py +++ b/torchao/__init__.py @@ -5,8 +5,10 @@ ) from . import dtypes import torch -from . import _C -from . import ops +from torch.testing._internal.common_utils import IS_FBCODE +if not IS_FBCODE: + from . import _C + from . import ops __all__ = [ "dtypes", From c2657e4287c7de4a7461cf8242e12de9fbc19881 Mon Sep 17 00:00:00 2001 From: jeromeku Date: Mon, 6 May 2024 17:37:14 -0700 Subject: [PATCH 18/61] Fused DoRA kernels (#216) * add dora kernels --- benchmarks/dora/bench_utils.py | 131 +++++ benchmarks/dora/dora_bench.py | 348 +++++++++++ test/dora/test_dora_fusion.py | 193 +++++++ test/dora/test_dora_layer.py | 111 ++++ torchao/prototype/common/__init__.py | 0 torchao/prototype/common/profiling_tools.py | 268 +++++++++ torchao/prototype/dora/README.md | 164 ++++++ torchao/prototype/dora/__init__.py | 0 torchao/prototype/dora/dora_layer.py | 194 +++++++ torchao/prototype/dora/dora_profile.py | 124 ++++ torchao/prototype/dora/kernels/__init__.py | 0 torchao/prototype/dora/kernels/common.py | 176 ++++++ .../prototype/dora/kernels/custom_autotune.py | 395 +++++++++++++ torchao/prototype/dora/kernels/matmul.py | 259 +++++++++ torchao/prototype/dora/kernels/smallk.py | 545 ++++++++++++++++++ 15 files changed, 2908 insertions(+) create mode 100644 benchmarks/dora/bench_utils.py create mode 100644 benchmarks/dora/dora_bench.py create mode 100644 test/dora/test_dora_fusion.py create mode 100644 test/dora/test_dora_layer.py create mode 100644 torchao/prototype/common/__init__.py create mode 100644 torchao/prototype/common/profiling_tools.py create mode 100644 torchao/prototype/dora/README.md create mode 100644 torchao/prototype/dora/__init__.py create mode 100644 torchao/prototype/dora/dora_layer.py create mode 100644 torchao/prototype/dora/dora_profile.py create mode 100644 torchao/prototype/dora/kernels/__init__.py create mode 100644 torchao/prototype/dora/kernels/common.py create mode 100644 torchao/prototype/dora/kernels/custom_autotune.py create mode 100644 torchao/prototype/dora/kernels/matmul.py create mode 100644 torchao/prototype/dora/kernels/smallk.py diff --git a/benchmarks/dora/bench_utils.py b/benchmarks/dora/bench_utils.py new file mode 100644 index 0000000000..2de4fa637a --- /dev/null +++ b/benchmarks/dora/bench_utils.py @@ -0,0 +1,131 @@ +import torch +from bitsandbytes.nn import Linear4bit +from hqq.core.quantize import BaseQuantizeConfig, HQQLinear + +from prototypes.dora.dora_layer import BNBDoRALinear, HQQDoRALinear +from prototypes.dora.kernels.matmul import triton_mm +from prototypes.dora.kernels.smallk import triton_mm_small_k + + +def make_lora_weights(ranks, in_features, out_features, dtype): + As = [torch.randn(rank, in_features, device="cuda", dtype=dtype) for rank in ranks] + Bs = [torch.randn(out_features, rank, device="cuda", dtype=dtype) for rank in ranks] + return As, Bs + + +def make_dora_source_and_magnitude(in_features, out_features, dtype): + source = torch.randn(out_features, in_features, device="cuda", dtype=dtype) + magnitude = torch.randn(out_features, device="cuda", dtype=dtype) + return source, magnitude + + +def make_inputs(batch_sizes, seqlen, in_features, dtype): + xs = [ + torch.randn(bs * seqlen, in_features, device="cuda", dtype=dtype) + for bs in batch_sizes + ] + return xs + + +def make_weights(batch_sizes, in_features, out_features, dtype): + weights = [ + torch.randn(in_features, out_features, device="cuda", dtype=dtype) + for _ in range(len(batch_sizes)) + ] + return weights + + +def make_epilogue_sources(batch_sizes, seqlen, out_features, dtype): + epilogue_sources = [ + torch.randn(bs * seqlen, out_features, device="cuda", dtype=dtype) + for bs in batch_sizes + ] + return epilogue_sources + + +def make_epilogue_scales(batch_sizes, out_features, dtype): + epilogue_scales = [ + torch.randn(out_features, device="cuda", dtype=dtype) + for _ in range(len(batch_sizes)) + ] + return epilogue_scales + + +def dora_colnorm_ref( + A: torch.Tensor, + B: torch.Tensor, + base_weight: torch.Tensor, + magnitude_vector: torch.Tensor, +): + column_norm = (base_weight + B @ A).norm(p=2, dim=1) + return magnitude_vector / column_norm + + +def dora_mm_epilogue_ref( + A: torch.Tensor, + B: torch.Tensor, + epilogue_source: torch.Tensor, + epilogue_scale: torch.Tensor, +): + out = (A @ B + epilogue_source) * epilogue_scale[None, :] + return out + + +def dora_ref(x, w, lora_A, lora_B, magnitude_vector): + # (bs x seq_len x out_features) = (bs x seq_len x in_features) @ (in_features x rank) @ (rank x out_features) + lora_out = (x @ lora_A.T) @ lora_B.T + # (out_features) + magnitude_scale = dora_colnorm_ref(lora_A, lora_B, w, magnitude_vector) + # (bs x seq_len x out_features) + dora_out_ref = dora_mm_epilogue_ref(x, w, lora_out, magnitude_scale) + return dora_out_ref + + +def dora_triton(x, w, lora_A, lora_B, magnitude_vector): + lora_out = (x @ lora_A.T) @ lora_B.T + magnitude_scale = triton_mm_small_k( + lora_B, + lora_A, + epilogue_norm=True, + source=w, + magnitude=magnitude_vector, + store_acc=False, + ) + dora_out = triton_mm(x, w, epilogue_source=lora_out, epilogue_scale=magnitude_scale) + return dora_out + + +def setup_dora_base_layers(layer_type, in_features, out_features, dtype): + if "bnb" in layer_type: + # BitsandBytes + base_layer = Linear4bit( + input_features=in_features, + output_features=out_features, + bias=False, + quant_type="nf4", + compute_dtype=dtype, + ).cuda() + base_layer.quant_state.dtype = base_layer.compute_dtype + dora_cls = BNBDoRALinear + elif "hqq" in layer_type: + # HQQ + quant_config = BaseQuantizeConfig( + nbits=4, + group_size=64, + quant_zero=False, + quant_scale=False, + offload_meta=True, + view_as_float=True, + ) + linear = torch.nn.Linear( + in_features, out_features, dtype=dtype, bias=False + ).cuda() + base_layer = HQQLinear( + linear, + quant_config, + compute_dtype=dtype, + ) + dora_cls = HQQDoRALinear + else: + raise ValueError(f"Unknown layer type: {layer_type}") + return base_layer, dora_cls diff --git a/benchmarks/dora/dora_bench.py b/benchmarks/dora/dora_bench.py new file mode 100644 index 0000000000..305cfbdb15 --- /dev/null +++ b/benchmarks/dora/dora_bench.py @@ -0,0 +1,348 @@ +import argparse + +import pandas as pd +import torch +from bench_utils import ( + dora_colnorm_ref, + dora_mm_epilogue_ref, + dora_ref, + dora_triton, + make_dora_source_and_magnitude, + make_epilogue_scales, + make_epilogue_sources, + make_inputs, + make_lora_weights, + make_weights, + setup_dora_base_layers, +) +from triton.testing import do_bench + +from torchao.prototype.dora.kernels.matmul import triton_mm +from torchao.prototype.dora.kernels.smallk import triton_mm_small_k +from torchao.prototype.common.profiling_tools import pivot_df + + +def run_colnorm_bench(args): + in_features, out_features = args.in_features, args.out_features + + dtype = getattr(torch, args.dtype) + + # Inputs + As, Bs = make_lora_weights(args.dora_ranks, in_features, out_features, dtype) + source, magnitude = make_dora_source_and_magnitude(in_features, out_features, dtype) + + # torch.compile + dora_colnorm_compiled = torch.compile(dora_colnorm_ref, mode=args.compile_mode) + compiled_key = f"compiled_{args.compile_mode}" + + # Benchmark + timings = [] + + for a, b in zip(As, Bs): + ref_t = do_bench(lambda: dora_colnorm_ref(a, b, source, magnitude)) + compiled_t = do_bench(lambda: dora_colnorm_compiled(a, b, source, magnitude)) + + test_t = do_bench( + lambda: triton_mm_small_k( + b, + a, + epilogue_norm=True, + source=source, + magnitude=magnitude, + store_acc=False, + ), + ) + common_args = [a.shape[0], a.shape[1], b.shape[0], args.dtype] + timings.append([*common_args, "ref", ref_t]) + timings.append([*common_args, compiled_key, compiled_t]) + timings.append([*common_args, "triton", test_t]) + + # Group results for kernel type + headers = ["rank", "in_features", "out_features", "dtype", "kernel", "time(ms)"] + df = pd.DataFrame(timings, columns=headers) + id_cols = ["rank", "in_features", "out_features"] + pivot_df( + df, + id_cols=id_cols, + columns="kernel", + values="time(ms)", + column_order=[*id_cols, "ref", compiled_key, "triton"], + show=True, + ) + + +def run_epilogue_bench(args): + in_features, out_features = args.in_features, args.out_features + seqlen = args.seqlen + batch_sizes = ( + args.batch_sizes if isinstance(args.batch_sizes, list) else [args.batch_sizes] + ) + dtype = getattr(torch, args.dtype) + + # Inputs + xs = make_inputs(batch_sizes, seqlen, in_features, dtype) + weights = make_weights(batch_sizes, in_features, out_features, dtype) + epilogue_sources = make_epilogue_sources(batch_sizes, seqlen, out_features, dtype) + epilogue_scales = make_epilogue_scales(batch_sizes, out_features, dtype) + + # torch.compile + dora_mm_epilogue_compiled = torch.compile( + dora_mm_epilogue_ref, mode=args.compile_mode + ) + compiled_key = f"compiled_{args.compile_mode}" + + # Benchmark + timings = [] + for bs, x, w, e1, e2 in zip( + batch_sizes, xs, weights, epilogue_sources, epilogue_scales + ): + ref_t = do_bench(lambda: dora_mm_epilogue_ref(x, w, e1, e2)) + compiled_t = do_bench(lambda: dora_mm_epilogue_compiled(x, w, e1, e2)) + + test_t = do_bench( + lambda: triton_mm( + x, + w, + epilogue_source=e1, + epilogue_scale=e2, + ) + ) + common_args = [bs, seqlen, w.shape[0], w.shape[1], args.dtype] + timings.append([*common_args, "ref", ref_t]) + timings.append([*common_args, compiled_key, compiled_t]) + timings.append([*common_args, "triton", test_t]) + + # Group results for kernel type + headers = [ + "bs", + "seqlen", + "in_features", + "out_features", + "dtype", + "kernel", + "time(ms)", + ] + df = pd.DataFrame(timings, columns=headers) + id_cols = ["bs", "seqlen", "in_features", "out_features", "dtype"] + + pivot_df( + df, + id_cols=id_cols, + columns="kernel", + values="time(ms)", + column_order=[*id_cols, "ref", compiled_key, "triton"], + show=True, + ) + + +def run_full_dora(args): + """Dora Layer + + out = (x @ base_weight + lora_out) * magnitude_scale + where: + `lora_out = lora_B(lora_A(x)` + `magnitude_scale = (base_weight + lora_B @ lora_A).norm(p=2, dim=1) * magnitude_vector` + """ + + dtype = getattr(torch, args.dtype) + xs = make_inputs(args.batch_sizes, args.seqlen, args.in_features, dtype) + weights = make_weights(args.batch_sizes, args.in_features, args.out_features, dtype) + lora_As, lora_Bs = make_lora_weights( + args.dora_ranks, args.in_features, args.out_features, dtype + ) + _, magnitude_vector = make_dora_source_and_magnitude( + args.in_features, args.out_features, dtype + ) + + # torch.compile + dora_compiled = torch.compile(dora_ref, mode=args.compile_mode) + # triton_compiled = torch.compile(dora_triton, mode=args.compile_mode) + + compiled_key = f"compiled_{args.compile_mode}" + # triton_compiled_key = f"triton_compiled_{args.compile_mode}" + + # Benchmark + timings = [] + for lora_A, lora_B in zip(lora_As, lora_Bs): + for bs, x, w in zip(args.batch_sizes, xs, weights): + # ref = dora_ref(x, w, lora_A, lora_B, magnitude_vector) + # test = dora_triton(x, w, lora_A, lora_B, magnitude_vector) + # compiled = dora_compiled(x, w, lora_A, lora_B, magnitude_vector) + # test_compiled = triton_compiled(x, w, lora_A, lora_B, magnitude_vector) + # print(f"triton diff: {(ref - test).abs().max()}") + # print(f"compiled diff: {(ref - compiled).abs().max()}") + # print(f"triton compiled diff: {(ref - test_compiled).abs().max()}") + ref_t = do_bench(lambda: dora_ref(x, w, lora_A, lora_B, magnitude_vector)) + compiled_t = do_bench( + lambda: dora_compiled(x, w, lora_A, lora_B, magnitude_vector) + ) + triton_t = do_bench( + lambda: dora_triton(x, w, lora_A, lora_B, magnitude_vector) + ) + # triton_compiled_t = do_bench( + # lambda: triton_compiled(x, w, lora_A, lora_B, magnitude_vector) + # ) + + # batch_size, seq_len, rank, in_features, out_features, dtype + common_args = [ + bs, + args.seqlen, + lora_A.shape[0], + args.in_features, + args.out_features, + args.dtype, + ] + timings.append([*common_args, "ref", ref_t]) + timings.append([*common_args, compiled_key, compiled_t]) + timings.append([*common_args, "triton", triton_t]) + # timings.append([*common_args, triton_compiled_key, triton_compiled_t]) + + headers = [ + "bs", + "seqlen", + "rank", + "in_features", + "out_features", + "dtype", + "kernel", + "time(ms)", + ] + df = pd.DataFrame(timings, columns=headers) + id_cols = ["bs", "seqlen", "rank", "in_features", "out_features", "dtype"] + + pivot_df( + df, + id_cols=id_cols, + columns="kernel", + values="time(ms)", + column_order=[ + *id_cols, + "ref", + compiled_key, + "triton", + ], # , triton_compiled_key], + show=True, + ) + + +def run_dora_layer_bench(args): + dtype = getattr(torch, args.dtype) + in_features, out_features = args.in_features, args.out_features + xs = make_inputs(args.batch_sizes, args.seqlen, args.in_features, dtype) + base_layer, dora_cls = setup_dora_base_layers( + args.kernel, in_features, out_features, dtype + ) + + timings = [] + layer_key = f"{args.kernel}" + layer_key_fused = f"{args.kernel}-fused" + + for bs, x in zip(args.batch_sizes, xs): + for rank in args.dora_ranks: + dora_layer = dora_cls(base_layer, rank).cuda() + common_args = [ + bs, + args.seqlen, + rank, + args.in_features, + args.out_features, + args.dtype, + ] + ref_t = do_bench(lambda: dora_layer.forward(x)) + fused_t = do_bench(lambda: dora_layer.forward_fused(x)) + timings.append([*common_args, layer_key, ref_t]) + timings.append([*common_args, layer_key_fused, fused_t]) + + headers = [ + "bs", + "seqlen", + "rank", + "in_features", + "out_features", + "dtype", + "layer", + "time(ms)", + ] + df = pd.DataFrame(timings, columns=headers) + id_cols = ["bs", "seqlen", "rank", "in_features", "out_features", "dtype"] + + pivot_df( + df, + id_cols=id_cols, + columns="layer", + values="time(ms)", + column_order=[ + *id_cols, + layer_key, + layer_key_fused, + ], + show=True, + ) + + +def run_bench(args): + print(f"""Running {args.kernel} benchmark with dtype={args.dtype}, batch_sizes={args.batch_sizes}, seqlen={args.seqlen}, + in_features={args.in_features}, out_features={args.out_features}, dora_ranks={args.dora_ranks}""") + if args.kernel == "dora-colnorm": + return run_colnorm_bench(args) + elif args.kernel == "dora-mm-epilogue": + return run_epilogue_bench(args) + elif args.kernel == "dora-full": + return run_full_dora(args) + elif args.kernel == "dora-bnb" or args.kernel == "dora-hqq": + return run_dora_layer_bench(args) + else: + raise ValueError(f"Unknown kernel: {args.kernel}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + "--kernel", + type=str, + default="dora-mm-epilogue", + choices=( + "dora-colnorm", + "dora-mm-epilogue", + "dora-full", + "dora-bnb", + "dora-hqq", + ), + help="""The kernel to benchmark + + dora-colnorm: Small K GEMM with fused column-norm and magnitude vector multiplication + dora-mm-epilogue: GEMM with fused epilogue elementwise addition and broadcasted scale + dora-full: Full DORA kernel (dora-colnorm + dora-mm-epilogue) + dora-bnb: BNBDoRALinear layer with fused kernels + dora-hqq: HQQDoRALinear layer with fused kernels + """, + ) + parser.add_argument("--seqlen", type=int, default=512) + parser.add_argument( + "--batch_sizes", type=int, nargs="+", default=[1, 2, 4, 8, 16, 32] + ) + parser.add_argument("--dora_ranks", type=int, nargs="+", default=[16, 32, 64]) + parser.add_argument("--in_features", type=int, default=4096) + parser.add_argument("--out_features", type=int, default=4096) + parser.add_argument( + "--dtype", + type=str, + default="float16", + choices=("float16", "bfloat16", "float32"), + ) + parser.add_argument( + "--compile_mode", + type=str, + default="default", + choices=( + "default", + "reduce-overhead", + "max-autotune-no-cudagraphs", + "max-autotune", + ), + ) + + args = parser.parse_args() + run_bench(args) diff --git a/test/dora/test_dora_fusion.py b/test/dora/test_dora_fusion.py new file mode 100644 index 0000000000..a7959f85af --- /dev/null +++ b/test/dora/test_dora_fusion.py @@ -0,0 +1,193 @@ +import sys + +import pytest + +if sys.version_info < (3, 11): + pytest.skip("requires Python >= 3.11", allow_module_level=True) + +triton = pytest.importorskip("triton", reason="requires triton") + +import itertools + +import torch + +from torchao.prototype.dora.kernels.matmul import triton_mm +from torchao.prototype.dora.kernels.smallk import triton_mm_small_k + +torch.manual_seed(0) + +# Test configs +M = 4096 +N = 4096 +Ks = [int(2**i) for i in range(4, 7)] + +FUSED_DORA_SHAPES = [(M, N, K) for K in Ks[:1]] + +DTYPES = [torch.float32, torch.float16, torch.bfloat16] + +STORE_ACC = [False] +EPILOGUE_NORM = [True, False] +ADD_SOURCE = [True] +MAGNITUDE_VECTOR = [True] +FUSED_DORA_TEST_CONFIGS = list( + itertools.product( + FUSED_DORA_SHAPES, + STORE_ACC, + EPILOGUE_NORM, + ADD_SOURCE, + MAGNITUDE_VECTOR, + DTYPES, + ) +) + + +def _arg_to_id(arg): + if isinstance(arg, (tuple, list)): + return "x".join([str(x) for x in arg]) + return str(arg) + + +def check(expected, actual, dtype): + if dtype == torch.float32: + atol = 1e-4 + elif dtype == torch.float16: + atol = 1e-3 + elif dtype == torch.bfloat16: + atol = 1e-2 + else: + raise ValueError(f"Unsupported dtype: {dtype}") + diff = (expected - actual).abs().max() + print(f"diff: {diff}") + # assert diff < atol + return diff + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU") +@pytest.mark.parametrize( + "shape, store_acc, epilogue_norm, add_source, magnitude_vector, dtype", + FUSED_DORA_TEST_CONFIGS, + ids=_arg_to_id, +) +def test_dora_column_norm( + shape, store_acc, epilogue_norm, add_source, magnitude_vector, dtype +): + if not (store_acc or epilogue_norm): + pytest.skip("Either store_acc or epilogue_norm must be True") + + M, N, K = shape + A = torch.randn(M, K, device="cuda", dtype=dtype) + B = torch.randn(K, N, device="cuda", dtype=dtype) + source = torch.randn(M, N, device="cuda", dtype=dtype) + magnitude = torch.randn(M, device="cuda", dtype=dtype) + + c_ref = torch.matmul(A, B) + norm2_ref = 1 / c_ref.norm(2, dim=1) + source_ref = source + c_ref + source_norm2_ref = 1 / (source + c_ref).norm(2, dim=1) + source_norm2_magnitude_ref = magnitude * source_norm2_ref + + # First test small K only kernel, no epilogue + # source = None # source # None + # magnitude = None # magnitude # None + + tt_out = triton_mm_small_k( + A, + B, + source=source if add_source else None, + magnitude=magnitude if magnitude_vector else None, + epilogue_norm=epilogue_norm, + store_acc=store_acc, + ) + + if store_acc: + c_test = tt_out[0] if epilogue_norm else tt_out + if add_source: + check(source_ref, c_test, dtype) + else: + check(c_ref, c_test, dtype) + + if epilogue_norm: + norm2_test = tt_out[1] if store_acc else tt_out + if add_source: + if magnitude_vector: + check(source_norm2_magnitude_ref, norm2_test, dtype) + else: + check(source_norm2_ref, norm2_test, dtype) + else: + check(norm2_ref, norm2_test, dtype) + + +BATCH_SIZES = [int(2**i) for i in range(6)] +SEQ_LENS = [512] +IN_FEATURES = [4096] +OUT_FEATURES = [4096] +FUSED_MATMUL_SHAPES = [ + (bs * seqlen, in_features, out_features) + for bs, seqlen, in_features, out_features in zip( + BATCH_SIZES, SEQ_LENS, IN_FEATURES, OUT_FEATURES + ) +] +EPILOGUE_ELEMENTWISE_ADD = [True] +EPILOGUE_BROADCAST_SCALE = [True] + +FUSED_MATMUL_TEST_CONFIGS = list( + itertools.product( + FUSED_MATMUL_SHAPES[:1], + DTYPES, + EPILOGUE_ELEMENTWISE_ADD, + EPILOGUE_BROADCAST_SCALE, + ) +) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU") +@pytest.mark.parametrize( + "shape, dtype, epilogue_add, epilogue_scale", + FUSED_MATMUL_TEST_CONFIGS, + ids=_arg_to_id, +) +def test_dora_matmul(shape, dtype, epilogue_add, epilogue_scale): + M, K, N = shape + A = torch.randn(M, K, device="cuda", dtype=dtype) + B = torch.randn(K, N, device="cuda", dtype=dtype) + C = torch.randn(M, N, device="cuda", dtype=dtype) if epilogue_add else None + scale = torch.randn(N, device="cuda", dtype=dtype) if epilogue_scale else None + + D_ref = torch.matmul(A, B) + if epilogue_add: + D_ref += C + if epilogue_scale: + D_ref *= scale.unsqueeze(0) + + D_test = triton_mm(A, B, epilogue_source=C, epilogue_scale=scale) + check(D_ref, D_test, dtype) + + +MODES = ["default"] + + +@pytest.mark.skip("TODO: torch.compile does not work with custom kernel") +@pytest.mark.parametrize( + "shape, dtype, epilogue_add, epilogue_scale, mode", + [[*cfg, mode] for cfg in FUSED_MATMUL_TEST_CONFIGS for mode in MODES][:1], + ids=_arg_to_id, +) +def test_dora_matmul_compile(shape, dtype, epilogue_add, epilogue_scale, mode): + M, K, N = shape + A = torch.randn(M, K, device="cuda", dtype=dtype) + B = torch.randn(K, N, device="cuda", dtype=dtype) + C = torch.randn(M, N, device="cuda", dtype=dtype) if epilogue_add else None + scale = torch.randn(N, device="cuda", dtype=dtype) if epilogue_scale else None + + D_ref = torch.matmul(A, B) + if epilogue_add: + D_ref += C + if epilogue_scale: + D_ref *= scale.unsqueeze(0) + + D_test = triton_mm(A, B, epilogue_source=C, epilogue_scale=scale) + check(D_ref, D_test, dtype) + + triton_compiled = torch.compile(triton_mm, mode=mode) + D_compiled = triton_compiled(A, B, epilogue_source=C, epilogue_scale=scale) + check(D_ref, D_compiled, dtype) diff --git a/test/dora/test_dora_layer.py b/test/dora/test_dora_layer.py new file mode 100644 index 0000000000..dd38cc8d6b --- /dev/null +++ b/test/dora/test_dora_layer.py @@ -0,0 +1,111 @@ +import sys + +import pytest + +if sys.version_info < (3, 11): + pytest.skip("requires Python >= 3.11", allow_module_level=True) + +bnbnn = pytest.importorskip("bitsandbytes.nn", reason="requires bitsandbytes") +hqq_core = pytest.importorskip("hqq.core.quantize", reason="requires hqq") + +import itertools + +import torch + +# Import modules as opposed to classes directly, otherwise pytest.importorskip always skips +Linear4bit = bnbnn.Linear4bit +BaseQuantizeConfig = hqq_core.BaseQuantizeConfig +HQQLinear = hqq_core.HQQLinear +from torchao.prototype.dora.dora_layer import BNBDoRALinear, DoRALinear, HQQDoRALinear + + +def check(expected, actual, dtype): + if dtype == torch.float32: + atol = 1e-4 + elif dtype == torch.float16: + atol = 1e-3 + elif dtype == torch.bfloat16: + atol = 1e-2 + else: + raise ValueError(f"Unsupported dtype: {dtype}") + diff = (expected - actual).abs().max() + print(f"diff: {diff}") + # assert diff < atol + return diff + + +def _arg_to_id(arg): + if isinstance(arg, (tuple, list)): + return "x".join([str(x) for x in arg]) + return str(arg) + + +BATCH_SIZES = [1] +SEQ_LENS = [512] +DTYPES = [torch.float32, torch.float16, torch.bfloat16] +IN_FEATURES = [4096] +OUT_FEATURES = [4096, 11008] +LORA_RANKS = [16] +MODEL_TYPES = ["DoRALinear", "BNBDoRALinear", "HQQDoRALinear"] + +TEST_CONFIGS = list( + itertools.product( + BATCH_SIZES, + SEQ_LENS, + IN_FEATURES, + OUT_FEATURES, + LORA_RANKS, + DTYPES, + MODEL_TYPES, + ) +) + + +@pytest.mark.parametrize( + "bs, seqlen, in_features, out_features, lora_rank, dtype, model_type", + TEST_CONFIGS, + ids=_arg_to_id, +) +def test_dora_layer( + bs, seqlen, in_features, out_features, lora_rank, dtype, model_type +): + x = torch.randn(bs, seqlen, in_features, dtype=dtype).cuda() + + if model_type == "DoRALinear": + base_layer = torch.nn.Linear( + in_features, out_features, dtype=dtype, bias=False + ).cuda() + dora_cls = DoRALinear + + elif model_type == "BNBDoRALinear": + base_layer = Linear4bit( + input_features=in_features, + output_features=out_features, + bias=False, + quant_type="nf4", + compute_dtype=dtype, + ).cuda() + base_layer.quant_state.dtype = base_layer.compute_dtype + dora_cls = BNBDoRALinear + + elif model_type == "HQQDoRALinear": + quant_config = BaseQuantizeConfig( + nbits=4, + group_size=64, + quant_zero=False, + quant_scale=False, + offload_meta=True, + view_as_float=True, + ) + torch_base = torch.nn.Linear(in_features, out_features, dtype=dtype, bias=False) + base_layer = HQQLinear( + torch_base, + quant_config, + compute_dtype=dtype, + ) + dora_cls = HQQDoRALinear + dora_layer = dora_cls(base_layer, lora_rank).cuda() + + ref = dora_layer.forward(x) + test = dora_layer.forward_fused(x) + check(ref, test, dtype) diff --git a/torchao/prototype/common/__init__.py b/torchao/prototype/common/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torchao/prototype/common/profiling_tools.py b/torchao/prototype/common/profiling_tools.py new file mode 100644 index 0000000000..607895d4e4 --- /dev/null +++ b/torchao/prototype/common/profiling_tools.py @@ -0,0 +1,268 @@ +import os +import types +from datetime import datetime +from functools import partial + +import pandas as pd +import torch +import torch.autograd.profiler_util +from tabulate import tabulate +from torch.autograd.profiler import record_function +from torch.cuda.nvtx import range as nvtx_range +from triton.testing import do_bench + +# from torch.cuda.nvtx import range_pop, range_push + +TIME_FORMAT_STR: str = "%m_%d" +PROFILE_DIR = "./profiles" + + +def simple_bench(fn, *args, **kwargs): + t = do_bench(lambda: fn(*args, **kwargs)) + return t + + +def check(expected, actual, atol=1e-3): + diff = (expected - actual).abs().max() + print(f"diff: {diff}") + # assert diff < atol + + +def benchmark_mm( + test_fn, xs, weight, ref_fn=torch.matmul, headers=["M", "K", "N", "test", "ref"] +): + timings = [] + for x in xs: + M, K = x.shape + _, N = weight.shape + assert x.shape[1] == weight.shape[0] + print(f"Benchmarking {(M, K, N)}") + test_times = do_bench(lambda: test_fn(x, weight)) + ref_times = do_bench(lambda: ref_fn(x, weight)) + timings.append([M, K, N, test_times, ref_times]) + return pd.DataFrame(timings, columns=headers) + + +def run_bench(xs, weight): + df = benchmark_mm(xs, weight) + print(tabulate(df, headers="keys", floatfmt=".4f")) + return df + + +class CudaProfilerCtx: + def __enter__(self): + print("Starting cuda profiler") + torch.cuda.cudart().cudaProfilerStart() + return self + + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: + print("Stopping cuda profiler") + torch.cuda.cudart().cudaProfilerStop() + if exc_type is not None: + print(f"Exception occurred: {exc_type}, {exc_value}") + # Return True to suppress the exception + return True + + def step(self): + pass + + +def trace_handler( + prof: torch.profiler.profile, + group_by_stack: int = 5, + group_by_input_shapes: bool = False, + prefix="", + out_dir=None, + export_events=False, + export_trace=True, + export_memory_timeline=False, +): + # Prefix for file names. + out_dir = out_dir or PROFILE_DIR + timestamp = datetime.now().strftime(TIME_FORMAT_STR) + file_prefix = os.path.join(out_dir, f"{prefix}-{timestamp}") + + if export_events: + evt_list = prof.key_averages( + group_by_stack_n=group_by_stack, group_by_input_shape=group_by_input_shapes + ) + torch.save(evt_list, f"{file_prefix}-key_averages.pt") + + # Construct the trace file. + if export_trace: + prof.export_chrome_trace(f"{file_prefix}-chrome-trace.json") + + # Construct the memory timeline file. + if export_memory_timeline: + prof.export_memory_timeline( + f"{file_prefix}-memory-timeline.html", device="cuda:0" + ) + prof.export_memory_timeline( + f"{file_prefix}-memory-timeline.json", device="cuda:0" + ) + + +# print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)) + + +def get_torch_profiler( + name, + with_stack=True, + with_flops=True, + with_modules=True, + record_shapes=False, + export_events=False, + export_trace=True, + export_memory_timeline=False, + out_dir=None, + warmup=1, + active=5, +): + if not os.path.exists(out_dir): + os.makedirs(out_dir) + callback = partial( + trace_handler, + prefix=name, + out_dir=out_dir, + group_by_input_shapes=record_shapes, + group_by_stack=5 if export_events else None, + export_events=export_events, + export_trace=export_trace, + export_memory_timeline=export_memory_timeline, + ) + return torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + record_shapes=record_shapes, + with_stack=with_stack, + with_flops=with_flops, + with_modules=with_modules, + profile_memory=export_memory_timeline, + schedule=torch.profiler.schedule(wait=0, warmup=warmup, active=active), + on_trace_ready=callback, + ) + + +class TorchProfilerCtx: + @staticmethod + def profiler( + name, + out_dir, + warmup=1, + active=5, + record_shapes=False, + with_stack=True, + export_events=False, + export_trace=True, + export_memory_timeline=False, + ): + return get_torch_profiler( + name, + with_stack=with_stack, + record_shapes=export_memory_timeline or record_shapes, + export_events=export_events, + export_trace=export_trace, + export_memory_timeline=export_memory_timeline, + out_dir=out_dir, + warmup=warmup, + active=active, + ) + + +def get_annotation_ctx(profiler_type): + assert profiler_type in ["nsys", "torch"] + if profiler_type == "nsys": + return nvtx_range + else: + return record_function + + +_PERF_COLUMNS = [ + "key", + "count", + "cpu_children", + "cpu_parent", + "self_device_time_total", + "cuda_time", + "flops", + "self_cpu_time", + "self_cpu_time_total", + "cpu_time", + "cpu_time_total" "self_device_memory_usage", + "device_memory_usage", + "self_cpu_memory_usage", + "cpu_memory_usage", +] +PERF_COLS_SELECT = [ + "key", + "cpu_parent", + "cpu_children", + # "self_cpu_time", + # "self_cpu_time_total", + "cpu_time", + "cpu_time_total", + "cuda_time", + "self_device_time_total", +] + + +# cuda_time, cpu_time are avg times -- corresponds to CUDA time avg and CPU time avg in table() above +# "self" times is not meaningful for annotated regions, since they only have child regions +def is_function(obj): + return isinstance(obj, types.FunctionType) + + +def is_method(obj): + return isinstance(obj, types.MethodType) + + +def is_private(prop): + return prop.startswith("_") + + +def should_exclude(obj, prop): + return ( + is_function(getattr(obj, prop)) + or is_method(getattr(obj, prop)) + or is_private(prop) + ) + + +def _get_event_props(event: torch.autograd.profiler_util.FunctionEvent): + props = [p for p in dir(event) if not should_exclude(event, p)] + return props + + +def get_events_df(events: torch.autograd.profiler_util.EventList): + event_props = _get_event_props(events[0]) + data = [{p: getattr(e, p) for p in event_props} for e in events] + return pd.DataFrame(data) + + +def get_perf_df(events: torch.autograd.profiler_util.EventList, sort=True): + df = get_events_df(events).filter(PERF_COLS_SELECT) + if sort: + df = df.sort_values(["cpu_time", "cuda_time"], ascending=False) + return df + + +def pivot_df( + df, + id_cols: str | list[str], + columns: str | list[str], + values: str | list[str], + column_order: list[str] = None, + show: bool = True, +): + df = df.pivot_table( + index=id_cols, + columns=columns, + values=values, + ).reset_index() + if column_order is not None: + df = df[column_order] + if show: + print(df.to_string(index=False)) + return df diff --git a/torchao/prototype/dora/README.md b/torchao/prototype/dora/README.md new file mode 100644 index 0000000000..d5bebc68a8 --- /dev/null +++ b/torchao/prototype/dora/README.md @@ -0,0 +1,164 @@ +## Fused DoRA Kernels + +Fused DoRA layer implementation that reduces number of individual kernels from ~10 -> 5. + +## Contents + +- [Background](#background) +- [Optimization](#optimization) +- [Key Contributions](#key-contributions) +- [Usage](#usage) +- [Tests](#tests) +- [Benchmarks](#benchmarks) +- [Profiling](#profiling) + +## Background + +[DoRA](https://arxiv.org/abs/2402.09353) (weight-decomposed low-rank adaptation) is a variant of LoRA that decomposes the LoRA update into magnitude and vector components. + +The DoRA layer is roughly as follows: + +```python + dora_out = (x @ base_weight.T + lora_out) * magnitude_scale +``` + +where: + +```python + lora_out = lora_B(lora_A(x)) + magnitude_scale = magnitude_vector / (base_weight + lora_B.weight @ lora_A.weight).norm(p=2, dim=1) +``` + +- `lora_A` and `lora_B` are `linear` layers with weight shapes `rank x in_features` and `out_features x rank`. +- `base_weight` is the weight of the frozen `linear` layer of shape `out_features x in_features`. +- `magnitude_vector` is initialized as the columnwise `2-norm` of the frozen weight (shape `out-features`). +- `x` are the inputs of shape `batch_size x seqlen x in_features` + +## Optimization + +After initial profiling, and as outlined above, the `DoRA` update layer requires multiple kernels. + +In order of compute intensity: + +- 4 GEMMs: + - `x @ base_weight` + - `lora_B(lora_A(x))` + - `lora_B.weight @ lora_A.weight` +- 1 Reduction: `2-norm` +- 4 Elementwise: matrix-matrix additions (2) and broadcasted matrix-vector multiplications (2). + +While `torch.compile` (and `CUDA` graphs) can partially mitigate the overhead of multiple small kernels and improve compute efficiency of individual kernels, there remains room for additional optimization by reordering the computations to facilitate fusions, and more importantly, exploiting the unique shapes of the GEMMs, thereby decreasing the number of kernel launches and increasing the compute intensity of each kernel. + +## Key Contributions + +**1 - Small K Fused Kernel** + +Note that the `lora_B.weight @ lora_A.weight` has a specific shape, where `K << {M, N}`. That is, `lora_B.weight` is `out_features x lora_rank` and `lora_A.weight` is `lora_rank x in_features`. + +Since `lora_rank` is typically `< 64` while `{in,out}-features` are typically `> 4096` (e.g., `Llama MLP / QKV projections`), this `GEMM` is inefficient, since each `CTA` loads a block, only to perform a few `MAC` iterations given small `K`. + +Moreover, note that the result of this `GEMM` is not needed -- we only need the `2-norm` of this computation. + +Combining these two observations, we can write a fused kernel where: + +1. Each `CTA` computes an _entire_ row of the output matrix, with the key assumption that `BLOCK_K = K`. That is, each `CTA` does a single MAC iteration to compute a `BLOCK_M x BLOCK_N` output, then iterates across dimension `N`. +2. Since each block processes an entire row, we can now additionally fuse a grid-wise reduction along `axis=1` into the kernel. In this case, we can directly fold the `2-norm` computation into the `GEMM`. +3. As an added bonus, we can also include the `base_weight` elementwise addition and `magnitude_vector` multiplication into the `GEMM` epilogue. + +Altogether, this allows us to fuse the following computation into a single kernel: + +```python + magnitude_scale = magnitude_vector / (base_weight + lora_B.weight @ lora_A.weight).norm(p=2, dim=1) +``` + +**2 - Fused Epilogue GEMM** + +Additionally, instead of computing the base layer output before the `DoRA / LoRA` updates, we can compute the latter (`loRA layer` and `magnitude_scale`) first, and fold these into the epilogue of the base layer `GEMM`: + +```python + + #DoRA / LoRA updates + lora_out = lora_B(lora_A(x)) + magnitude_scale = magnitude_vector / (base_weight + lora_B.weight @ lora_A.weight).norm(p=2, dim=1) + + #This is now a single kernel + final_out = (x @ base_weight.T + lora_out) * magnitude_scale +``` + +## Usage + +The fused kernels can be used to implement `DoRA` / `QDoRA` layers. + +A reference implementation is provided in `dora.dora_layer.DoRALinear`, which defines a base `QDoRA` linear layer (with a stub `dequantize` method) along with corresponding `BNBDoRALinear` and `HQQDoRALinear` subclasses, which override `dequantize` with their respective methods. + +_Example_ + +```python + import torch + from bitsandbytes.nn import Linear4bit + from torchao.prototypes.dora.dora_layer import BNBDoRALinear + + bs, seqlen = 1, 512 + dtype = torch.float16 + in_features, out_features, lora_rank = 4096, 4096, 16 + x = torch.randn(bs, seqlen, in_features, dtype=dtype, device="cuda") + + #Construct bitsnbytes QDoRA layer + base_layer = Linear4bit( + input_features=in_features, + output_features=out_features, + bias=False, + quant_type="nf4", + compute_dtype=dtype, + ).cuda() + base_layer.quant_state.dtype = base_layer.compute_dtype + dora_layer = BNBDoRALinear(base_layer, lora_rank) + + #Run reference forward pass + ref = dora_layer.forward(x) + + #Run fused forward pass + fused_out = dora_layer.forward_fused(x) +``` + +See `test/test_dora_layer.py` and `benchmarks/dora_bench.py` for more detailed usage. + +### Tests + +See `test/dora/test*`, for correctness checks of the fused kernels and layers. + +## Benchmarks + +See `benchmarks/dora_bench.py`. + +```python +python benchmarks/dora_bench.py --help +``` + +Run with flag `--kernel` set to one of `{dora-colnorm,dora-mm-epilogue}`, to benchmark the respective fused kernels against a reference `torch` / `torch.compile` implementation, or `--kernel=dora-full` to bench against the entire `DoRA` computation. + +Additionally, passing either `--kernel={dora-bnb, dora-hqq}` will bench a reference `QDoRA` layer against their fused implementations. + +## Profiling + +The reference `DoRALinear` layer described above also has an instrumented forward pass with annotated regions for each of the `DoRA` ops. + +An example script for running a profiled forward pass is provided in `dora/dora_profile.py`. + +To run with `torch.profiler`: + +``` +python dora_profile.py +``` + +which outputs chrome trace to default folder `dora_profiles`. + +To run with `nsys`: + +``` +nsys profile --capture_range=cudaProfilerApi ... python dora_profile.py --profiler=nsys +``` + +where `...` are other desired `nsys` options. + +Note that `--capture_range=cudaProfilerApi` is required. diff --git a/torchao/prototype/dora/__init__.py b/torchao/prototype/dora/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torchao/prototype/dora/dora_layer.py b/torchao/prototype/dora/dora_layer.py new file mode 100644 index 0000000000..e0c97cdcb3 --- /dev/null +++ b/torchao/prototype/dora/dora_layer.py @@ -0,0 +1,194 @@ +import logging + +import bitsandbytes as bnb +import torch +import torch.nn as nn +from bitsandbytes.nn import Linear4bit +from hqq.core.quantize import BaseQuantizeConfig, HQQBackend, HQQLinear + +from prototypes.dora.kernels.matmul import triton_mm +from prototypes.dora.kernels.smallk import triton_mm_small_k + +logger = logging.getLogger(__name__) + + +# Adapted from https://github.com/AnswerDotAI/fsdp_qlora/blob/dora/scripts/dora.py +class DoRALayer(nn.Module): + """DoRA Update""" + + def __init__( + self, in_features, out_features, lora_rank, device, dtype, *args, **kwargs + ): + super().__init__() + + # LoRA layers + std_dev = 1 / torch.sqrt(torch.tensor(lora_rank).float()) + lora_A_param = nn.Parameter( + torch.randn(lora_rank, in_features).to(device=device, dtype=dtype) * std_dev + ) + self.lora_A = nn.Linear( + in_features, lora_rank, bias=False, device=device, dtype=dtype + ) + setattr(self.lora_A, "weight", lora_A_param) + + self.lora_B = nn.Linear( + lora_rank, out_features, bias=False, device=device, dtype=dtype + ) + self.lora_B.weight.data.zero_() + + def forward(self, x, base_weight): + # LoRA update, shape `bs x seq_len x in-features @ in-features x lora-rank @ lora-rank x out-features = bs x seq_len x out-features` + output = self.lora_B(self.lora_A(x)) + + # DoRA Section 4.3. Column norm no gradient update. + column_norm = ( + (base_weight + self.lora_B.weight @ self.lora_A.weight) + .norm(p=2, dim=1) + .detach() + ) + + return output, column_norm + + +class DoRALinear(nn.Module): + """Reference DoRA Update Layer + + out = (x @ base_weight + lora_out) * magnitude_scale + where: + `lora_out = lora_B(lora_A(x)` + `magnitude_scale = (base_weight + lora_B @ lora_A).norm(p=2, dim=1) * magnitude_vector` + + base_weight is the weight of the frozen `linear` layer of shape `out_features x in_features`. + + In QDoRA, the base weight is quantized and needs an additional dequantization step. + In this base DoRA layer, a placeholder (no-op) `dequantize` method stub is provided, which simply + returns the base weight. + + For `bnb` and `hqq`, the respective `dequantize` method can be substituted. + """ + + def __init__(self, base_layer, lora_rank, *args, **kwargs): + super().__init__() + + # Get original (dequantized) weight dtype + dtype = getattr( + base_layer, "compute_dtype", next(base_layer.parameters()).dtype + ) + device = next(base_layer.parameters()).device + self.base_layer = base_layer + + # Initialize magnitude vec - TODO: this is clunky, better way to init? + base_weight = self.dequantize().clone().cuda() + self.magnitude_vec = nn.Parameter(base_weight.norm(p=2, dim=1)) + + del base_weight + torch.cuda.empty_cache() + + # DoRA layer + self.dora_layer = DoRALayer( + base_layer.in_features, + base_layer.out_features, + lora_rank, + device, + dtype, + *args, + **kwargs, + ) + + def dequantize(self): + return self.base_layer.weight + + def forward(self, x, *args, **kwargs): + # Out shape is either bs, seqlen, out_features or bs * seqlen, out_features + assert x.ndim == 2 or x.ndim == 3, "Expected 2D or 3D input" + dq_base_weight = self.dequantize() + out_shape = [*x.shape[:-1], dq_base_weight.shape[0]] + # Reshape to (bs * seqlen, out_features) + x = x.reshape(-1, x.shape[-1]) + + # LoRA update + lora_A_weight = self.dora_layer.lora_A.weight + lora_B_weight = self.dora_layer.lora_B.weight + lora_out = (x @ lora_A_weight.T) @ lora_B_weight.T + + # DoRA magnitude scale + column_norm = (dq_base_weight + lora_B_weight @ lora_A_weight).norm(p=2, dim=1) + magnitude_scale = self.magnitude_vec / column_norm + + # DoRA update + dora_out = (x @ dq_base_weight.T + lora_out) * magnitude_scale[None, :] + dora_out = dora_out.reshape(*out_shape) + + return dora_out + + def forward_fused(self, x, *args, **kwargs): + """Reorders computation as well employs two fused kernels to speed up computation. + + See README.md for description of fused kernels. + """ + assert x.ndim == 2 or x.ndim == 3, "Expected 2D or 3D input" + + dq_base_weight = self.dequantize() + # Out shape is either bs, seqlen, out_features or bs * seqlen, out_features + out_shape = [*x.shape[:-1], dq_base_weight.shape[0]] + # Reshape to (bs * seqlen, out_features) + x = x.reshape(-1, x.shape[-1]) + + # LoRA update + lora_A_weight = self.dora_layer.lora_A.weight + lora_B_weight = self.dora_layer.lora_B.weight + lora_out = (x @ lora_A_weight.T) @ lora_B_weight.T + + # DoRA magnitude + # Fused kernel #1: `magnitude_scale = (base_weight + lora_B @ lora_A).norm(p=2, dim=1) * magnitude_vector` + magnitude_scale = triton_mm_small_k( + lora_B_weight, + lora_A_weight, + epilogue_norm=True, + source=dq_base_weight, + magnitude=self.magnitude_vec, + store_acc=False, + ) + # DoRA update + # Fused kernel #2: `out = (x @ base_weight + lora_out) * magnitude_scale` + dora_out = triton_mm( + x, + dq_base_weight.T, + epilogue_source=lora_out, + epilogue_scale=magnitude_scale, + ) + dora_out = dora_out.reshape(out_shape) + + return dora_out + + # For profiling + def forward_instrumented(self, x, *args, **kwargs): + annotation_ctx = kwargs.pop("annotation_ctx") + with annotation_ctx("##dora_forward"): + with annotation_ctx("##base_layer"): + result = self.base_layer(x, *args, **kwargs) + + with annotation_ctx("##dora_layer"): + dq_weight = self.dequantize() + output, column_norm = self.dora_layer(x, dq_weight) + + with annotation_ctx("##dora_rescale"): + result += output + result = result / column_norm.view(1, 1, -1) + result = result * self.magnitude_vec.view(1, 1, -1) + + return result + + +class BNBDoRALinear(DoRALinear): + def dequantize(self): + return bnb.functional.dequantize_4bit( + self.base_layer.weight.data, self.base_layer.weight.quant_state + ) + + +class HQQDoRALinear(DoRALinear): + def dequantize(self): + return self.base_layer.dequantize() + + diff --git a/torchao/prototype/dora/dora_profile.py b/torchao/prototype/dora/dora_profile.py new file mode 100644 index 0000000000..bf87769742 --- /dev/null +++ b/torchao/prototype/dora/dora_profile.py @@ -0,0 +1,124 @@ +import argparse + +import torch +from bitsandbytes.nn import Linear4bit +from hqq.core.quantize import BaseQuantizeConfig, HQQBackend, HQQLinear + +from torchao.prototype.common.profiling_tools import ( + CudaProfilerCtx, + TorchProfilerCtx, + get_annotation_ctx, +) +from torchao.prototype.dora.dora_layer import BNBDoRALinear, DoRALinear, HQQDoRALinear + + +def run_profile(args, dora_forward): + if args.profiler == "nsys": + profiler = CudaProfilerCtx() + else: + profiler = TorchProfilerCtx.profiler( + f"dora_layer-{args.layer_type}", + active=max(5, args.num_iterations), + warmup=0, + out_dir=args.outdir, + ) + + annotation_ctx = get_annotation_ctx(args.profiler) + + x = torch.randn( + args.bs, args.seqlen, args.in_features, dtype=getattr(torch, args.dtype) + ).cuda() + for _ in range(args.warmup): + _ = dora_forward(x, annotation_ctx=annotation_ctx) + + with profiler as prof: + for _ in range(args.num_iterations): + _ = dora_forward(x, annotation_ctx=annotation_ctx) + prof.step() + print(f"Finished profiling, saving results to {args.outdir}") + + +def run(args): + in_features, out_features = args.in_features, args.out_features + dora_rank = args.dora_rank + dtype = getattr(torch, args.dtype) + + base_layer = torch.nn.Linear( + in_features, out_features, dtype=dtype, bias=False + ).cuda() + + if args.layer_type == "torch": + dora_layer = DoRALinear(base_layer=base_layer, lora_rank=dora_rank) + elif args.layer_type == "bnb": + base_layer = Linear4bit( + input_features=in_features, + output_features=out_features, + bias=False, + quant_type="nf4", + compute_dtype=dtype, + ) + base_layer.quant_state.dtype = base_layer.compute_dtype + dora_layer = BNBDoRALinear(base_layer=base_layer, lora_rank=dora_rank) + elif args.layer_type == "hqq": + quant_config = BaseQuantizeConfig( + nbits=4, + group_size=64, + quant_zero=False, + quant_scale=False, + offload_meta=True, + view_as_float=True, + ) + + base_layer = HQQLinear( + base_layer, + quant_config, + compute_dtype=dtype, + ) + + base_layer.set_backend(HQQBackend.PYTORCH) + dora_layer = HQQDoRALinear(base_layer=base_layer, lora_rank=dora_rank) + + run_profile(args, dora_layer.forward_instrumented) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + "--profiler", + type=str, + default="torch", + choices=("nsys", "torch"), + help=""" + Which profiler to use + + Default is the torch.profiler + + If using `nsys`, run the nsys profiler as so, substituting with other desired nsys options: + `nsys profile --capture-range=cudaProfilerApi ... python dora_profile.py --profiler=nsys` + + Note that `--capture-range=cudaProfilerApi` is required + """, + ) + parser.add_argument( + "--layer_type", + type=str, + default="torch", + choices=("torch", "bnb", "hqq"), + ) + parser.add_argument("--in_features", type=int, default=4096) + parser.add_argument("--out_features", type=int, default=4096) + parser.add_argument("--dora_rank", type=int, default=16) + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--seqlen", type=int, default=512) + parser.add_argument( + "--dtype", + type=str, + default="float16", + choices=("float16", "bfloat16", "float32"), + ) + parser.add_argument("--num_iterations", type=int, default=10) + parser.add_argument("--warmup", type=int, default=2) + parser.add_argument("--outdir", type=str, default="./dora_profiles") + run(parser.parse_args()) diff --git a/torchao/prototype/dora/kernels/__init__.py b/torchao/prototype/dora/kernels/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torchao/prototype/dora/kernels/common.py b/torchao/prototype/dora/kernels/common.py new file mode 100644 index 0000000000..cd0950d4c0 --- /dev/null +++ b/torchao/prototype/dora/kernels/common.py @@ -0,0 +1,176 @@ +from enum import Enum, StrEnum, unique + +import torch +import triton +import triton.language as tl + +# Re-exports +from triton.ops.matmul import ( + early_config_prune, + estimate_matmul_time, + get_configs_io_bound, + get_higher_dtype, +) +from triton.runtime import Config + + +@unique +class SwizzleType(Enum): + GROUPED = 0 + COLUMN_MAJOR = 1 + ROW_MAJOR = 2 + + +class TritonInputPrecision(StrEnum): + IEEE: str = "ieee" + TF32: str = "tf32" + TF32X3: str = "tf32x3" + + +TRITON_SUPPORTED_ACC_TYPES = { + torch.float16: (torch.float32, torch.float16), + torch.bfloat16: (torch.float32, torch.bfloat16), + torch.float32: (torch.float32,), + torch.int8: (torch.int32,), +} + +MATMUL_HEURISTICS = { + "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0, + "SPLIT_K": lambda args: 1 + if (args["A"].dtype == torch.bfloat16 or args["B"].dtype == torch.bfloat16) + else args["SPLIT_K"], # atomic add not supported for bfloat16 +} + + +def to_tl_type(ty): + return getattr(tl, str(ty).split(".")[-1]) + + +def get_compute_bound_configs(): + configs = [ + # basic configs for compute-bound matmuls + Config( + {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=5, + num_warps=2, + ), + # good for int8 + Config( + {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=5, + num_warps=2, + ), + ] + return configs + + +@triton.jit() +def swizzle_tile( + pid, + M, + N, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + GROUP_M: tl.constexpr, + SWIZZLE: tl.constexpr, +): + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + + if SWIZZLE == tl.constexpr(SwizzleType.GROUPED): + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = tl.minimum(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + elif SWIZZLE == tl.constexpr(SwizzleType.COLUMN_MAJOR): + pid_m = pid % grid_m + pid_n = pid // grid_m + elif SWIZZLE == tl.constexpr(SwizzleType.ROW_MAJOR): + pid_m = pid // grid_n + pid_n = pid % grid_n + else: + tl.static_assert(False, "swizzle type not supported") + + return pid_m, pid_n diff --git a/torchao/prototype/dora/kernels/custom_autotune.py b/torchao/prototype/dora/kernels/custom_autotune.py new file mode 100644 index 0000000000..f671520687 --- /dev/null +++ b/torchao/prototype/dora/kernels/custom_autotune.py @@ -0,0 +1,395 @@ +from __future__ import annotations + +import builtins +import logging +import os +import time +from typing import Dict + +import numpy as np +from triton.runtime.cache import default_cache_dir +from triton.runtime.errors import OutOfResources +from triton.runtime.jit import KernelInterface +from triton.testing import do_bench + +logger = logging.getLogger(__file__) + + +class Autotuner(KernelInterface): + def __init__( + self, + fn, + arg_names, + configs, + key, + reset_to_zero, + restore_value, + prune_configs_by: Dict = None, + warmup=25, + rep=100, + ): + """ + :param prune_configs_by: a dict of functions that are used to prune configs, fields: + 'perf_model': performance model used to predicate running time with different configs, returns running time + 'top_k': number of configs to bench + 'prune_num_stages_by'(optional): a function used to prune num_stages. It takes configs:List[Config] as its input, and returns pruned configs. + """ + if not configs: + self.configs = [Config({}, num_warps=4, num_stages=2, num_ctas=1)] + else: + self.configs = configs + self.key_idx = [arg_names.index(k) for k in key] + self.cache = {} + self.arg_names = arg_names + + # Reset to zero or restore values + self.reset_idx = [] + if reset_to_zero is not None: + self.reset_idx = [arg_names.index(k) for k in reset_to_zero] + self.restore_idx = [] + if restore_value is not None: + self.restore_idx = [arg_names.index(k) for k in restore_value] + + # Hook to reset or restore for required tensors + self.pre_hook = lambda args, reset_only=False: 0 + self.post_hook = lambda args: 0 + if len(self.reset_idx) > 0 or len(self.restore_idx) > 0: + + def _pre_hook(args, reset_only=False): + for i in self.reset_idx: + args[i].zero_() + if not reset_only: + self.restore_copies = [args[i].clone() for i in self.restore_idx] + + self.pre_hook = _pre_hook + if len(self.restore_idx) > 0: + + def _post_hook(args): + for i, j in enumerate(self.restore_idx): + args[j].copy_(self.restore_copies[i]) + self.restore_copies = [] + + self.post_hook = _post_hook + + self.perf_model = None + self.configs_top_k = 1.0 + self.early_config_prune = None + if prune_configs_by: + self.perf_model = prune_configs_by.get("perf_model", self.perf_model) + self.configs_top_k = prune_configs_by.get("top_k", self.configs_top_k) + self.early_config_prune = prune_configs_by.get( + "early_config_prune", self.early_config_prune + ) + + self.fn = fn + self.num_warmups = warmup + self.num_reps = rep + # self.autotune_log_path = os.path.join(default_cache_dir(), autotune_log_file) + self.kernel_name = self._find_kernel_name() + + def _find_kernel_name(self): + try: + kernel_name = self.fn.__name__ + except AttributeError: + try: # in case JITfn is wrapped in both autotune and heuristic + kernel_name = self.fn.fn.__name__ + except: # noqa + kernel_name = self.fn.__name__ + return kernel_name + + def _get_key_combination(self, args, as_str=True, sep=" "): + key_vals = [f"{self.arg_names[i]}={args[i]}" for i in self.key_idx] + return f"{sep}".join(key_vals) if as_str else key_vals + + def _bench(self, *args, config, **meta): + # check for conflicts, i.e. meta-parameters both provided + # as kwargs and by the autotuner + conflicts = meta.keys() & config.kwargs.keys() + if conflicts: + raise ValueError( + f"Conflicting meta-parameters: {', '.join(conflicts)}." + " Make sure that you don't re-define auto-tuned symbols." + ) + # augment meta-parameters with tunable ones + current = dict(meta, **config.kwargs) + full_nargs = {**self.nargs, **current} + + def kernel_call(): + if config.pre_hook: + config.pre_hook(full_nargs) + self.pre_hook(args) + self.fn.run( + *args, + num_warps=config.num_warps, + num_stages=config.num_stages, + num_ctas=config.num_ctas, + **current, + ) + self.post_hook(args) + + try: + return do_bench( + kernel_call, + warmup=self.num_warmups, + rep=self.num_reps, + quantiles=(0.5, 0.2, 0.8), + ) + except OutOfResources: + return [float("inf"), float("inf"), float("inf")] + + def run(self, *args, **kwargs): + self.nargs = dict(zip(self.arg_names, args)) + logger.debug(f"Autotune Num Configs: {len(self.configs)}") + if len(self.configs) > 1: + all_args = {**self.nargs, **kwargs} + _args = [] + for name in self.arg_names: + if name in all_args: + _args.append(all_args[name]) + key = [_args[i] for i in self.key_idx] + for arg in _args: + if hasattr(arg, "dtype"): + key.append(str(arg.dtype)) + key = tuple(key) + if key not in self.cache: + logger.debug( + f"\n==== Autotune ====\nRunning autotune for {self.kernel_name} for {len(self.configs)} total configs" + f" for key combination {self._get_key_combination(args)}..." + ) + # prune configs + pruned_configs = self.prune_configs(kwargs) + logger.debug(f"\nNum configs after pruning {len(pruned_configs)}") + bench_start = time.time() + timings = {} + for config in pruned_configs: + timings[config] = self._bench(*args, config=config, **kwargs) + # timings = { + # config: self._bench(*args, config=config, **kwargs) + # for config in pruned_configs + # } + bench_end = time.time() + self.bench_time = bench_end - bench_start + self.cache[key] = builtins.min(timings, key=timings.get) + self.pre_hook(args, reset_only=True) + self.configs_timings = timings + + sorted_timings = dict( + sorted(timings.items(), key=lambda x: np.mean(x[1])) + ) + _key_suffix = self._get_key_combination(args, sep="-") + autotune_file = f"autotune_{self.kernel_name}_{_key_suffix}.log" + autotune_log_path = os.path.join(default_cache_dir(), autotune_file) + + logger.debug(f"\nFinished autotune, writing log to {autotune_log_path}") + + with open(f"{autotune_log_path}", "w") as f: + f.write( + f" ==== Autotune Results ====\nKernel name: {self.kernel_name}\nArgs: {self.arg_names}\nKeys: {self._get_key_combination(args)}\n" + ) + f.write(f"\nPruned configs:\n") + for cfg in pruned_configs: + f.write(f"{cfg}\n") + f.write(f"Timings:\n") + for cfg, timing in sorted_timings.items(): + f.write(f"{cfg} {timing} \n") + f.write(f"Best config: {self.cache[key]}\n") + else: + logger.debug( + f"Key {key} for {self.kernel_name} already in cache, skipping autotune\n" + ) + + config = self.cache[key] + # logger.debug(f"\nAutotune: Cache hit! Running best config...") + else: + config = self.configs[0] + self.best_config = config + logger.debug(f"\nAutotune Best Config: {config}\n") + + full_nargs = {**self.nargs, **kwargs, **self.best_config.kwargs} + if config.pre_hook is not None: + config.pre_hook(full_nargs) + ret = self.fn.run( + *args, + num_warps=config.num_warps, + num_stages=config.num_stages, + num_ctas=config.num_ctas, + **kwargs, + **config.kwargs, + ) + self.nargs = None + return ret + + def prune_configs(self, kwargs): + pruned_configs = self.configs + if self.early_config_prune: + pruned_configs = self.early_config_prune(self.configs, self.nargs) + if self.perf_model: + top_k = self.configs_top_k + if isinstance(top_k, float) and top_k <= 1.0: + top_k = int(len(self.configs) * top_k) + if len(pruned_configs) > top_k: + est_timing = { + config: self.perf_model( + **self.nargs, + **kwargs, + **config.kwargs, + num_stages=config.num_stages, + num_warps=config.num_warps, + num_ctas=config.num_ctas, + ) + for config in pruned_configs + } + pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[ + :top_k + ] + return pruned_configs + + def warmup(self, *args, **kwargs): + self.nargs = dict(zip(self.arg_names, args)) + ret = [] + for config in self.prune_configs(kwargs): + ret.append( + self.fn.warmup( + *args, + num_warps=config.num_warps, + num_ctas=config.num_ctas, + num_stages=config.num_stages, + **kwargs, + **config.kwargs, + ) + ) + self.nargs = None + return ret + + +class Config: + """ + An object that represents a possible kernel configuration for the auto-tuner to try. + + :ivar meta: a dictionary of meta-parameters to pass to the kernel as keyword arguments. + :type meta: dict[Str, Any] + :ivar num_warps: the number of warps to use for the kernel when compiled for GPUs. For example, if + `num_warps=8`, then each kernel instance will be automatically parallelized to + cooperatively execute using `8 * 32 = 256` threads. + :type num_warps: int + :ivar num_stages: the number of stages that the compiler should use when software-pipelining loops. + Mostly useful for matrix multiplication workloads on SM80+ GPUs. + :type num_ctas: int + :ivar num_ctas: number of blocks in a block cluster. SM90+ only. + :ivar pre_hook: a function that will be called before the kernel is called. Parameters of this + function are args. + """ + + def __init__(self, kwargs, num_warps=4, num_stages=2, num_ctas=1, pre_hook=None): + self.kwargs = kwargs + self.num_warps = num_warps + self.num_ctas = num_ctas + self.num_stages = num_stages + self.pre_hook = pre_hook + + def __str__(self): + res = [] + for k, v in self.kwargs.items(): + res.append(f"{k}: {v}") + res.append(f"num_warps: {self.num_warps}") + res.append(f"num_ctas: {self.num_ctas}") + res.append(f"num_stages: {self.num_stages}") + return ", ".join(res) + + +def autotune( + configs, + key, + prune_configs_by=None, + reset_to_zero=None, + restore_value=None, + warmup=25, + rep=100, +): + """ + Decorator for auto-tuning a :code:`triton.jit`'d function. + + .. highlight:: python + .. code-block:: python + + @triton.autotune(configs=[ + triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4), + triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8), + ], + key=['x_size'] # the two above configs will be evaluated anytime + # the value of x_size changes + ) + @triton.jit + def kernel(x_ptr, x_size, **META): + BLOCK_SIZE = META['BLOCK_SIZE'] + :note: When all the configurations are evaluated, the kernel will run multiple times. + This means that whatever value the kernel updates will be updated multiple times. + To avoid this undesired behavior, you can use the `reset_to_zero` argument, which + resets the value of the provided tensor to `zero` before running any configuration. + :param configs: a list of :code:`triton.Config` objects + :type configs: list[triton.Config] + :param key: a list of argument names whose change in value will trigger the evaluation of all provided configs. + :type key: list[str] + :param prune_configs_by: a dict of functions that are used to prune configs, fields: + 'perf_model': performance model used to predicate running time with different configs, returns running time + 'top_k': number of configs to bench + 'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It takes configs:List[Config] as its input, and returns pruned configs. + :param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs. + :type reset_to_zero: list[str] + :param restore_value: a list of argument names whose value will be restored after evaluating any configs. + :type restore_value: list[str] + :param warmup: Warmup time (in ms) to pass to benchmarking, defaults to 25. + :type warmup: int + :param rep: Repetition time (in ms) to pass to benchmarking, defaults to 100. + :type rep: int + """ + + def decorator(fn): + return Autotuner( + fn, + fn.arg_names, + configs, + key, + reset_to_zero, + restore_value, + prune_configs_by, + warmup, + rep, + ) + + return decorator + + +class Heuristics(KernelInterface): + def __init__(self, fn, arg_names, values) -> None: + self.fn = fn + self.values = values + self.arg_names = arg_names + + def run(self, *args, **kwargs): + for v, heur in self.values.items(): + kwargs[v] = heur({**dict(zip(self.arg_names, args)), **kwargs}) + return self.fn.run(*args, **kwargs) + + +def heuristics(values): + """ + Decorator for specifying how the values of certain meta-parameters may be computed. + This is useful for cases where auto-tuning is prohibitevely expensive, or just not applicable. + + .. highlight:: python + .. code-block:: python + + @triton.heuristics(values={'BLOCK_SIZE': lambda args: 2 ** int(math.ceil(math.log2(args[1])))}) + @triton.jit + def kernel(x_ptr, x_size, **META): + BLOCK_SIZE = META['BLOCK_SIZE'] # smallest power-of-two >= x_size + :param values: a dictionary of meta-parameter names and functions that compute the value of the meta-parameter. + each such function takes a list of positional arguments as input. + :type values: dict[str, Callable[[list[Any]], Any]] + """ + + def decorator(fn): + return Heuristics(fn, fn.arg_names, values) + + return decorator diff --git a/torchao/prototype/dora/kernels/matmul.py b/torchao/prototype/dora/kernels/matmul.py new file mode 100644 index 0000000000..66e5ef77ef --- /dev/null +++ b/torchao/prototype/dora/kernels/matmul.py @@ -0,0 +1,259 @@ +import logging + +import torch +import triton +import triton.language as tl + +from .common import ( + MATMUL_HEURISTICS, + TRITON_SUPPORTED_ACC_TYPES, + SwizzleType, + TritonInputPrecision, + early_config_prune, + estimate_matmul_time, + get_compute_bound_configs, + get_configs_io_bound, + get_higher_dtype, + swizzle_tile, + to_tl_type, +) +from .custom_autotune import autotune + +logger = logging.getLogger(__name__) + + +_AUTOTUNE_TOPK = 10 + + +@autotune( + get_compute_bound_configs() + get_configs_io_bound(), + key=["M", "N", "K"], + prune_configs_by={ + "early_config_prune": early_config_prune, + "perf_model": estimate_matmul_time, + "top_k": _AUTOTUNE_TOPK, + }, +) +@triton.heuristics( + { + "EVEN_K": MATMUL_HEURISTICS["EVEN_K"], + "SPLIT_K": MATMUL_HEURISTICS["SPLIT_K"], + } +) +@triton.jit +def _matmul_kernel( + A, + B, + C, + M, + N, + K, # + stride_am, + stride_ak, # + stride_bk, + stride_bn, # + stride_cm, + stride_cn, # + acc_dtype: tl.constexpr, # + input_precision: tl.constexpr, # + fp8_fast_accum: tl.constexpr, # + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_K: tl.constexpr, # + GROUP_M: tl.constexpr, + SPLIT_K: tl.constexpr, + EVEN_K: tl.constexpr, + AB_DTYPE: tl.constexpr, # + SWIZZLE: tl.constexpr, + EPILOGUE_ELEMENTWISE_ADD: tl.constexpr = False, + Epilogue_source=None, + EPILOGUE_BROADCAST_SCALE: tl.constexpr = False, + Epilogue_scale=None, +): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + + # Threadblock swizzle + pid_m, pid_n = swizzle_tile(pid, M, N, BLOCK_M, BLOCK_N, GROUP_M, SWIZZLE) + + # Operand offsets + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + + # Operand pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + # Allocate accumulator + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=acc_dtype) + + # MAC Loop + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0) + if AB_DTYPE is not None: + a = a.to(AB_DTYPE) + b = b.to(AB_DTYPE) + if fp8_fast_accum: + acc = tl.dot( + a, b, acc, out_dtype=acc_dtype, input_precision=input_precision + ) + else: + acc += tl.dot(a, b, out_dtype=acc_dtype, input_precision=input_precision) + + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + + # Convert acc to output dtype + acc = acc.to(C.dtype.element_ty) + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + # mask = (rm < M)[:, None] & (rn < N)[None, :] + mask_m = (rm < M)[:, None] + mask_n = (rn < N)[None, :] + if EPILOGUE_ELEMENTWISE_ADD: + Epilogue_source = Epilogue_source + ( + rm[:, None] * stride_cm + rn[None, :] * stride_cn + ) + source = tl.load(Epilogue_source, mask=mask_m & mask_n) + acc += source + if EPILOGUE_BROADCAST_SCALE: + Epilogue_scale = Epilogue_scale + (rn[None, :]) + scale = tl.load(Epilogue_scale, mask=mask_n) + acc *= scale + + if SPLIT_K == 1: + tl.store(C, acc, mask=mask_m & mask_n) + else: + tl.atomic_add(C, acc, mask=mask_m & mask_n) + + +def triton_mm( + a, + b, + epilogue_source=None, + epilogue_scale=None, + acc_dtype=None, + input_precision=TritonInputPrecision.IEEE, + fp8_fast_accum=False, + output_dtype=None, + swizzle: SwizzleType = SwizzleType.GROUPED, + GROUP_M: int = 8, +): + """Triton GEMM implementation, `D = AB + C` + + Based on `triton.ops.matmul`, with the addition of epilogue. + + Args: + a (torch.Tensor): operand A + b (torch.Tensor): operand B + epilogue_source(optional, torch.Tensor): operand C in `D = AB + C` + epilogue_scale(optional, torch.Tensor): row-wise scale-vector of dim `N` in `D = scale * (AB + C)` + acc_dtype (torch.DType): accumulator type in MAC loop + input_precision (TritonInputPrecision): precision to use for fp32 matmul + fp8_fast_accum (bool) + output_dtype (optional, torch.DType): output type of the GEMM, defaults to higher dtype of A / B + + Returns: + torch.Tensor: `D = AB + C` + """ + device = a.device + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + + # common type between a and b + ab_dtype = get_higher_dtype(a.dtype, b.dtype) + + # allocates output + if output_dtype is None: + output_dtype = ab_dtype + + c = torch.empty((M, N), device=device, dtype=output_dtype) + + # Epilogue pre-conditions + # TODO Check strides? + if epilogue_source is not None: + assert epilogue_source.shape == (M, N), "incompatible dimensions" + assert epilogue_source.dtype == c.dtype, "incompatible dtype" + + if epilogue_scale is not None: + assert ( + epilogue_scale.ndim == 1 and epilogue_scale.shape[0] == N + ), "incompatible dimensions" + assert epilogue_scale.dtype == c.dtype, "incompatible dtype" + + # choose accumulator type + if acc_dtype is None: + acc_dtype = TRITON_SUPPORTED_ACC_TYPES[ab_dtype][0] + else: + assert isinstance(acc_dtype, torch.dtype), "acc_dtype must be a torch.dtype" + assert ( + acc_dtype in TRITON_SUPPORTED_ACC_TYPES[a.dtype] + ), "acc_dtype not compatible with the type of a" + assert ( + acc_dtype in TRITON_SUPPORTED_ACC_TYPES[b.dtype] + ), "acc_dtype not compatible with the type of b" + + # convert to triton types + acc_dtype = to_tl_type(acc_dtype) + ab_dtype = to_tl_type(ab_dtype) + output_dtype = to_tl_type(output_dtype) + + # Tensor cores support input with mixed float8 types. + if a.dtype in [tl.float8e4nv, tl.float8e5] and b.dtype in [ + tl.float8e4nv, + tl.float8e5, + ]: + ab_dtype = None + + grid = lambda META: ( + triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), + META["SPLIT_K"], + ) + + _matmul_kernel[grid]( + a, + b, + c, + M, + N, + K, # + a.stride(0), + a.stride(1), # + b.stride(0), + b.stride(1), # + c.stride(0), + c.stride(1), # + acc_dtype=acc_dtype, # + input_precision=input_precision, # + fp8_fast_accum=fp8_fast_accum, # + GROUP_M=GROUP_M, + AB_DTYPE=ab_dtype, + SWIZZLE=swizzle, + EPILOGUE_ELEMENTWISE_ADD=epilogue_source is not None, + Epilogue_source=epilogue_source, + EPILOGUE_BROADCAST_SCALE=epilogue_scale is not None, + Epilogue_scale=epilogue_scale, + ) + return c diff --git a/torchao/prototype/dora/kernels/smallk.py b/torchao/prototype/dora/kernels/smallk.py new file mode 100644 index 0000000000..fc24ea223f --- /dev/null +++ b/torchao/prototype/dora/kernels/smallk.py @@ -0,0 +1,545 @@ +import heapq +import logging +from enum import Enum, StrEnum, unique + +import torch +import triton +import triton.language as tl +from triton.ops.matmul import ( + estimate_matmul_time, + get_configs_io_bound, + get_higher_dtype, +) +from triton.runtime import driver + +from .custom_autotune import Config, autotune + +logger = logging.getLogger(__name__) + + +@unique +class SwizzleType(Enum): + GROUPED = 0 + COLUMN_MAJOR = 1 + ROW_MAJOR = 2 + + +class TritonInputPrecision(StrEnum): + IEEE: str = "ieee" + TF32: str = "tf32" + TF32X3: str = "tf32x3" + + +TRITON_SUPPORTED_ACC_TYPES = { + torch.float16: (torch.float32, torch.float16), + torch.bfloat16: (torch.float32, torch.bfloat16), + torch.float32: (torch.float32,), + torch.int8: (torch.int32,), +} + + +def to_tl_type(ty): + return getattr(tl, str(ty).split(".")[-1]) + + +def get_compute_bound_configs(): + configs = [ + # basic configs for compute-bound matmuls + Config( + {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, + num_stages=5, + num_warps=2, + ), + # good for int8 + Config( + {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=3, + num_warps=8, + ), + Config( + {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=4, + num_warps=4, + ), + Config( + {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, + num_stages=5, + num_warps=2, + ), + ] + return configs + + +@triton.jit() +def swizzle_tile( + pid, + M, + N, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + GROUP_M: tl.constexpr, + SWIZZLE: tl.constexpr, +): + if SWIZZLE == tl.constexpr(SwizzleType.GROUPED): + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = tl.minimum(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + else: + tl.static_assert(False, "swizzle type not supported") + + return pid_m, pid_n + + +def get_small_k_configs(): + configs = get_compute_bound_configs() + get_configs_io_bound() + KEYS_TO_REMOVE = ["BLOCK_K", "SPLIT_K"] + for cfg in configs: + for key in KEYS_TO_REMOVE: + del cfg.kwargs[key] + + return configs + + +def small_k_early_config_prune(configs, named_args, **kwargs): + device = torch.cuda.current_device() + capability = torch.cuda.get_device_capability() + # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages + dtsize = named_args["A"].element_size() + dtype = named_args["A"].dtype + + # 1. make sure we have enough smem + pruned_configs = [] + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_stages = ( + kw["BLOCK_M"], + kw["BLOCK_N"], + named_args["K"], + config.num_stages, + ) + + max_shared_memory = driver.active.utils.get_device_properties(device)[ + "max_shared_mem" + ] + required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize + if required_shared_memory <= max_shared_memory: + pruned_configs.append(config) + configs = pruned_configs + + # Some dtypes do not allow atomic_add + # if dtype not in [torch.float16, torch.float32]: + # configs = [config for config in configs if config.kwargs["SPLIT_K"] == 1] + + # group configs by (BLOCK_M,_N,_K, num_warps) + configs_map = {} + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_warps, num_stages = ( + kw["BLOCK_M"], + kw["BLOCK_N"], + named_args["K"], + # kw["SPLIT_K"], + config.num_warps, + config.num_stages, + ) + + key = (BLOCK_M, BLOCK_N, BLOCK_K, num_warps) + if key in configs_map: + configs_map[key].append((config, num_stages)) + else: + configs_map[key] = [(config, num_stages)] + + pruned_configs = [] + for k, v in configs_map.items(): + BLOCK_M, BLOCK_N, BLOCK_K, num_warps = k + if capability[0] >= 8: + # compute cycles (only works for ampere GPUs) + mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) + mma_cycles = mmas / min(4, num_warps) * 8 + + ldgsts_latency = 300 # Does this matter? + optimal_num_stages = ldgsts_latency / mma_cycles + + # nearest stages, prefer large #stages + nearest = heapq.nsmallest( + 2, + v, + key=lambda x: 10 + abs(x[1] - optimal_num_stages) + if (x[1] - optimal_num_stages) < 0 + else x[1] - optimal_num_stages, + ) + + for n in nearest: + pruned_configs.append(n[0]) + else: # Volta & Turing only supports num_stages <= 2 + random_config = v[0][0] + random_config.num_stages = 2 + pruned_configs.append(random_config) + return pruned_configs + + +SMALLK_HEURISTICS = { + "BLOCK_K": lambda args: args["K"], +} + +_AUTOTUNE_TOPK = 10 + + +# @heuristics(SMALLK_HEURISTICS) +@autotune( + get_small_k_configs(), + key=["M", "N", "K"], + prune_configs_by={ + "early_config_prune": small_k_early_config_prune, + "perf_model": estimate_matmul_time, + "top_k": _AUTOTUNE_TOPK, + }, +) +@triton.jit +def _mm_small_k_kernel( + A, + B, + M, + N, + K, # + stride_am, + stride_ak, # + stride_bk, + stride_bn, # + acc_dtype: tl.constexpr, # + input_precision: tl.constexpr, # + fp8_fast_accum: tl.constexpr, # + BLOCK_K: tl.constexpr, # + AB_DTYPE: tl.constexpr, # + BLOCK_M: tl.constexpr = 256, + BLOCK_N: tl.constexpr = 64, + C=None, + stride_cm=None, + stride_cn=None, # + Norm2=None, + Source=None, + stride_sourcem=None, + stride_sourcen=None, + Magnitude=None, + ADD_SOURCE: tl.constexpr = False, + EPILOGUE_NORM: tl.constexpr = False, + EPILOGUE_MAGNITUDE: tl.constexpr = False, + STORE_ACC: tl.constexpr = False, +): + pid_m = tl.program_id(0) + + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rk = tl.arange(0, BLOCK_K) + + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + a = tl.load(A) + + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=acc_dtype) + + rn = tl.arange(0, BLOCK_N) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + if STORE_ACC: + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + + if ADD_SOURCE: + Source = Source + (rm[:, None] * stride_sourcem + rn[None, :] * stride_sourcen) + + if EPILOGUE_NORM: + norm_vec = tl.zeros((BLOCK_M,), dtype=acc_dtype) + + if EPILOGUE_MAGNITUDE: + Magnitude = Magnitude + ram + + mask_m = rm < M + + for n in range(0, tl.cdiv(N, BLOCK_N)): + # Advance B over N + + b = tl.load(B) + + if AB_DTYPE is not None: + a = a.to(AB_DTYPE) + b = b.to(AB_DTYPE) + + if fp8_fast_accum: + acc = tl.dot( + a, b, acc, out_dtype=acc_dtype, input_precision=input_precision + ) + else: + acc = tl.dot(a, b, out_dtype=acc_dtype, input_precision=input_precision) + + if ADD_SOURCE: + mask_n = (n * BLOCK_N + rn < N)[None, :] + source = tl.load(Source, mask=mask_m[:, None] & mask_n) + acc += source.to(acc_dtype) + Source += BLOCK_N * stride_sourcen + + # 2-norm = tl.sqrt(tl.sum(acc * acc, axis=1)) + if EPILOGUE_NORM: + norm_vec += tl.sum(acc * acc, axis=1) + + if STORE_ACC: + mask_n = (n * BLOCK_N + rn < N)[None, :] + tl.store(C, acc.to(C.dtype.element_ty), mask=mask_m[:, None] & mask_n) + C += BLOCK_N * stride_cn + + B += BLOCK_N * stride_bn + + if EPILOGUE_NORM: + Norm2 = Norm2 + rm + norm_vec = tl.rsqrt(norm_vec).to(Norm2.dtype.element_ty) + + if EPILOGUE_MAGNITUDE: + magnitude = tl.load(Magnitude, mask=mask_m) + norm_vec *= magnitude + + tl.store(Norm2, norm_vec, mask=mask_m) + + +def triton_mm_small_k( + a: torch.Tensor, + b: torch.Tensor, + epilogue_norm: bool = True, + source: torch.Tensor = None, + magnitude: torch.Tensor = None, + store_acc: bool = False, + acc_dtype: torch.dtype = None, + input_precision: TritonInputPrecision = TritonInputPrecision.IEEE, + fp8_fast_accum: bool = False, + output_dtype: torch.dtype = None, +): + """Computes GEMM for small K {16, 32, 64} + + Assumes that K is small enough that the MAC loop within each block is a single iteration. + Instead of iterating over K, we iterate over N per block such that each block computes a BLK_M x N row of C. Kernel grid is ceildiv(M, BLOCK_M). + + This specialized GEMM is primarily useful for low-rank projections and fusing grid-wide reductions into the epilogue. + + Currently, the following fusions are implemented: + - `epilogue_norm` - when set to True, the kernel computes the reverse 2-norm along axis=1 of AB ( `1 / 2-norm(AB, axis=1)` ) + - `source=torch.Tensor` - when passed a tensor of shape `M x N`, the kernel computes `D = AB + source` + - `magnitude=torch.Tensor` - when passed a tensor of shape `M`, the kernel additionally multiplies the epilogue norm by the magnitude vector + + Hence, when the above fusions are enabled, the kernel can be used to compute DoRA layer magnitude normalization: `magnitude * (base_weight + lora_B(lora_A(x))).norm(2, axis=1)` + + Args: + a (torch.Tensor): operand A + b (torch.Tensor): operand B + source (torch.Tensor): Operand C in `D = AB + C` + epilogue_norm (bool, optional): Whether to calculate 1 / 2-norm(AB, axis=1) + magnitude (torch.Tensor): vector to multiply epilogue norm by + store_acc (bool): whether to store `AB`, if False, then `epilogue_norm` must be True, in which case only the `2-norm` is stored + acc_dtype (torch.DType): accumulator type in MAC loop + input_precision (TritonInputPrecision): precision to use for fp32 matmul + fp8_fast_accum (bool) + output_dtype (torch.DType): type for output tensors (`D`, `2-norm`, etc.) + + Returns: + torch.Tensor + """ + assert store_acc or epilogue_norm, "Must use store_acc or epilogue_norm" + + device = a.device + + # Make sure inputs are contiguous + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + + assert a.shape[1] == b.shape[0], "Incompatible operand dimensions" + M, K = a.shape + _, N = b.shape + + assert K < 128, "K must be < 128 to use this kernel" + + # common type between a and b + ab_dtype = get_higher_dtype(a.dtype, b.dtype) + + if output_dtype is None: + output_dtype = ab_dtype + + if epilogue_norm: + norm2 = torch.zeros(M, device=device, dtype=output_dtype) + + # Must set out_dtype before converting dtypes to tl types + if store_acc: + c = torch.empty((M, N), device=device, dtype=output_dtype) + + if acc_dtype is None: + acc_dtype = TRITON_SUPPORTED_ACC_TYPES[ab_dtype][0] + else: + assert isinstance(acc_dtype, torch.dtype), "acc_dtype must be a torch.dtype" + assert ( + acc_dtype in TRITON_SUPPORTED_ACC_TYPES[a.dtype] + ), "acc_dtype not compatible with the type of a" + assert ( + acc_dtype in TRITON_SUPPORTED_ACC_TYPES[b.dtype] + ), "acc_dtype not compatible with the type of b" + + # Convert dtypes to tl types + acc_dtype = to_tl_type(acc_dtype) + ab_dtype = to_tl_type(ab_dtype) + output_dtype = to_tl_type(output_dtype) + + # Use fp8 types in MAC loop + if a.dtype in [tl.float8e4nv, tl.float8e5] and b.dtype in [ + tl.float8e4nv, + tl.float8e5, + ]: + ab_dtype = None + + logger.debug( + f"triton_mm_small_k: {ab_dtype=} {acc_dtype=} {input_precision=} {fp8_fast_accum=} {output_dtype=}" + ) + + # Set the fusion and other GEMM kwargs + # IMPORTANT: BLOCK_K must be equal to K + kwargs = { + "BLOCK_K": K, + "acc_dtype": acc_dtype, + "input_precision": input_precision, + "fp8_fast_accum": fp8_fast_accum, + "AB_DTYPE": ab_dtype, + "EPILOGUE_NORM": epilogue_norm, + "ADD_SOURCE": source is not None, + "EPILOGUE_MAGNITUDE": magnitude is not None, + "STORE_ACC": store_acc, + } + + # 2-norm params + if epilogue_norm: + kwargs["Norm2"] = norm2 + + # source params + if source is not None: + assert source.shape == (M, N) + kwargs["Source"] = source + kwargs["stride_sourcem"] = source.stride(0) + kwargs["stride_sourcen"] = source.stride(1) + else: + kwargs["Source"] = None + kwargs["stride_sourcem"] = 0 + kwargs["stride_sourcen"] = 0 + + # magnitude params, epilogue_norm must be True + if magnitude is not None: + assert epilogue_norm, "magnitude requires epilogue_norm" + assert magnitude.ndim == 1 and magnitude.shape[0] == M + kwargs["Magnitude"] = magnitude + + # store_acc, whether to store the intermediate AB + if store_acc: + kwargs["C"] = c + kwargs["stride_cm"] = c.stride(0) + kwargs["stride_cn"] = c.stride(1) + else: + kwargs["C"] = None + kwargs["stride_cm"] = 0 + kwargs["stride_cn"] = 0 + + # kwargs_str = " ".join( + # f"{k}={v}" for k, v in kwargs.items() if not isinstance(v, torch.Tensor) + # ) + # print(f"triton_mm_small_k: {kwargs_str}") + + # launch kernel + grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]),) + _mm_small_k_kernel[grid]( + a, + b, + M, + N, + K, # + a.stride(0), + a.stride(1), # + b.stride(0), + b.stride(1), # + **kwargs, + ) + + if store_acc: + if epilogue_norm: + return c, norm2 + else: + return c + return norm2 From f0bdc8fb9ee5d25127187889be2c238a7a517abe Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Mon, 6 May 2024 19:31:42 -0700 Subject: [PATCH 19/61] Unified AffineQuantizedTensor subclass (#214) Summary: Creatd a `AffineQuantizedTensor` subclass that works for both weight and input (for dynamic quantization), for all granularities (levering the recently added choose_qparams_affine, quantize_affine and dequantize_affine ops) only verified for 8da4w right now, we can make it work for other types of quantization (mostly the operator dispatching part) later Test Plan: python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_8da4w Reviewers: Subscribers: Tasks: Tags: Co-authored-by: Mark Saroufim --- test/quantization/test_quant_api.py | 67 +++++- torchao/quantization/quant_primitives.py | 16 +- torchao/quantization/subclass.py | 278 ++++++++++++++++++++++- 3 files changed, 344 insertions(+), 17 deletions(-) diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index 93ac6fe739..10d36f0c1b 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -87,7 +87,7 @@ def quantize(self, model: torch.nn.Module) -> torch.nn.Module: apply_dynamic_quant(model) return model -class M(torch.nn.Module): +class ToyLinearModel(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(64, 32, bias=False).to(torch.float) @@ -103,7 +103,7 @@ def forward(self, x): class TestQuantFlow(unittest.TestCase): def test_dynamic_quant_gpu_singleline(self): - m = M().eval() + m = ToyLinearModel().eval() m = _apply_dynamic_quant(m) quantized = m(*m.example_inputs()) # AssertionError: Expecting input to have dtype torch.float32, but got dtype: torch.float64 @@ -116,7 +116,7 @@ def test_dynamic_quant_gpu_singleline(self): @unittest.skip("skipping for now due to torch.compile error") def test_dynamic_quant_gpu_unified_api_unified_impl(self): quantizer = XNNPackDynamicQuantizer() - m = M().eval() + m = ToyLinearModel().eval() example_inputs = m.example_inputs() m = quantizer.prepare(m) m = quantizer.convert(m) @@ -131,7 +131,7 @@ def test_dynamic_quant_gpu_unified_api_unified_impl(self): @unittest.skip("FAILED test/quantization/test_quant_api.py::TestQuantFlow::test_dynamic_quant_gpu_unified_api_eager_mode_impl - AssertionError: Tensor-likes are not equal!") def test_dynamic_quant_gpu_unified_api_eager_mode_impl(self): quantizer = TorchCompileDynamicQuantizer() - m = M().eval() + m = ToyLinearModel().eval() example_inputs = m.example_inputs() m = quantizer.quantize(m) quantized = m(*example_inputs) @@ -141,7 +141,7 @@ def test_dynamic_quant_gpu_unified_api_eager_mode_impl(self): @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_int8_wo_quant_save_load(self): - m = M().eval().cpu() + m = ToyLinearModel().eval().cpu() apply_weight_only_int8_quant(m) example_inputs = m.example_inputs() ref = m(*example_inputs) @@ -150,7 +150,7 @@ def test_int8_wo_quant_save_load(self): state_dict = torch.load(_TMP_FN) os.remove(_TMP_FN) - m2 = M().eval() + m2 = ToyLinearModel().eval() apply_weight_only_int8_quant(m2) m2.load_state_dict(state_dict) m2 = m2.to(device="cuda") @@ -165,7 +165,7 @@ def test_8da4w_quantizer(self): from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear quantizer = Int8DynActInt4WeightQuantizer(groupsize=32) - m = M().eval() + m = ToyLinearModel().eval() example_inputs = m.example_inputs() m = quantizer.quantize(m) assert isinstance(m.linear1, Int8DynActInt4WeightLinear) @@ -392,5 +392,58 @@ def test_eval_wrapper(self): f"accuracy regressed from 7.76 to {result['results']['wikitext']['word_perplexity,none']}" ) + # TODO: move to a separate test file + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") + def test_quantized_tensor_subclass_8da4w(self): + from torchao.quantization.subclass import AffineQuantizedTensor + from torchao.quantization.quant_primitives import MappingType + import copy + + # weight settings + groupsize = 32 + mapping_type = MappingType.SYMMETRIC + block_size = (1, groupsize) + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + quant_min = -8 + quant_max = 7 + + # TODO: make a general helper function? + def get_per_token_block_size(x): + block_size = [] + for i in range(len(x.shape)-1): + block_size.append(1) + block_size.append(x.shape[-1]) + return block_size + + # input settings + input_mapping_type = MappingType.ASYMMETRIC + input_target_dtype = torch.int8 + input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) + + m = ToyLinearModel().eval() + m_copy = copy.deepcopy(m) + example_inputs = m.example_inputs() + m.linear1.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(m.linear1.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, input_quant_func=input_quant_func), requires_grad=False) + m.linear2.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(m.linear2.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, input_quant_func=input_quant_func), requires_grad=False) + assert isinstance(m.linear1.weight, AffineQuantizedTensor) + assert isinstance(m.linear2.weight, AffineQuantizedTensor) + + # reference + from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer + from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear + + quantizer = Int8DynActInt4WeightQuantizer(groupsize=groupsize) + m_copy = quantizer.quantize(m_copy) + assert isinstance(m_copy.linear1, Int8DynActInt4WeightLinear) + assert isinstance(m_copy.linear2, Int8DynActInt4WeightLinear) + + res = m(*example_inputs) + ref = m_copy(*example_inputs) + self.assertTrue(torch.equal(res, ref)) + + + + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 90316e1557..f59144becd 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -136,7 +136,7 @@ def _get_reduction_params(block_size, input_size): def quantize_affine( input: torch.Tensor, - block_size: List[int], + block_size: Tuple[int, ...], scale: torch.Tensor, zero_point: Optional[torch.Tensor], output_dtype: torch.dtype, @@ -146,7 +146,7 @@ def quantize_affine( """ Args: input (torch.Tensor): original float32 or bfloat16 Tensor - block_size: (List[int]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam + block_size: (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam e.g. when size is the same as the input tensor dimension, we are using per tensor quantization scale (float): quantization parameter for affine quantization zero_point (int): quantization parameter for affine quantization @@ -191,7 +191,7 @@ def quantize_affine( def dequantize_affine( input: torch.Tensor, - block_size: List[int], + block_size: Tuple[int, ...], scale: torch.Tensor, zero_point: Optional[torch.Tensor], input_dtype: torch.dtype, @@ -244,7 +244,7 @@ class MappingType(Enum): def choose_qparams_affine( input: torch.Tensor, mapping_type: MappingType, - block_size: List[int], + block_size: Tuple[int, ...], target_dtype: torch.dtype, quant_min: Optional[int] = None, quant_max: Optional[int] = None, @@ -256,12 +256,14 @@ def choose_qparams_affine( Args: input (torch.Tensor): fp32, bf16, fp16 input Tensor mapping_type (MappingType): determines how the qparams are calculated, symmetric or asymmetric + block_size: (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam + e.g. when size is the same as the input tensor dimension, we are using per tensor quantization target_dtype (torch.dtype): dtype for target quantized Tensor quant_min (Optional[int]): minimum quantized value for target quantized Tensor quant_max (Optioanl[int]): maximum quantized value for target quantized Tensor - eps (Optional[float]: minimum scale - scale_dtype (torch.dtype): dtype for scales - zero_point_dtype (torch.dtype): dtype for zero_points + eps (Optional[float]): minimum scale, if not provided, default to eps of input.dtype + scale_dtype (torch.dtype): dtype for scale Tensor + zero_point_dtype (torch.dtype): dtype for zero_point Tensor Output: Tuple of scales and zero_points Tensor with requested dtype diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 7de4a6169f..148228a030 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -15,14 +15,19 @@ groupwise_affine_quantize_tensor, quant_int8_dynamic_per_token_linear, unpack_tinygemm_scales_and_zeros, + choose_qparams_affine, + quantize_affine, + dequantize_affine, ) from .utils import find_multiple +from typing import Tuple, Optional, Callable __all__ = [ "Int8DynamicallyQuantizedLinearWeight", "Int8WeightOnlyQuantizedLinearWeight", "Int4WeightOnlyQuantizedLinearWeight", + "AffineQuantizedTensor", ] @@ -134,14 +139,21 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): + # Note: we only added cpu path here for 8da4w, this is for executorch, in the future + # 1. we'll add cpu/cuda version (int4mm etc.) + # 2. we'll need to hide the 8da4w executorch version under things like layouts (we also have multiple impl for cpu kernel as Michael mentioned), so it will be something like + # cpu device + et laytout --> gives current 8da4w executorch representation + # cpu device + avx layout --> gives optimized kernel for 8da4w in avx cpu etc. + # cuda device + some layout --> gives cuda kernel + # two scenarios where we currently fall back to vanilla mm: - # 1 - when tensor is on CPU: we are missing qmm for CPU, but we should have a CPU implementation - # for consistency and to allow people to test + # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized + # kernels in CPU as well, see the note above # 2 - we're given non-floats - quantizing long to int8 is crazy if ( func in [aten.mm.default, aten.addmm.default] and args[0].is_floating_point() - and args[0].is_cuda + and args[0].device == torch.device("cpu") ): if func == aten.addmm.default: assert args[1].shape[-1] == args[2].shape[0], ( @@ -592,3 +604,263 @@ def to_qtensor_components(cls, input_float, groupsize=128, inner_k_tiles=8): ) int_data = aten._convert_weight_to_int4pack(input_int4x8, inner_k_tiles) return int_data, scales_and_zeros, False, groupsize, inner_k_tiles + + +class AffineQuantizedTensor(torch.Tensor): + """ + Base affine quantized tensor subclass. When the from_float method is used, + to create an instance of any AffineQuantizedTensor + + The shape and dtype of the tensor subclass represent how the tensor subclass looks externally, + regardless of the internal representation's type or orientation. + + Affine quantization means we quantize the floating point tensor with an affine transformation: + quantized_tensor = float_tensor / scale + zero_point + + fields: + int_data (torch.Tensor): the quantized integer data Tensor + scale (torch.Tensor): the scale Tensor used to map between floating point tensor to quantized tensor + zero_point (torch.Tensor): the zero_point Tensor used to map between floating point tensor to quantized tensor + block_size (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam + e.g. when size is the same as the input tensor dimension, we are using per tensor quantization + shape (torch.Size): the shape for the Tensor + quant_min (Optional[int]): minimum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` + quant_max (Optional[int]): maximum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` + input_quant_func (Optional[Callable]): function for quantizing the input float Tensor to a quantized tensor subclass object, that takes input Tensor as input and outputs an AffineQuantizedTensor object + dtype: dtype for external representation of the tensor, e.g. torch.float32 + """ + + @staticmethod + def __new__( + cls, + int_data: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + block_size: Tuple[int, ...], + shape: torch.Size, + quant_min: Optional[int] = None, + quant_max: Optional[int] = None, + input_quant_func: Optional[Callable] = None, + dtype=None, + *args, + **kwargs + ): + kwargs["device"] = int_data.device + kwargs["layout"] = ( + kwargs.get("layout") if kwargs.get("layout", False) else int_data.layout + ) + if dtype is None: + dtype = scale.dtype + kwargs["dtype"] = dtype + assert not kwargs.get("requires_grad", False) + kwargs["requires_grad"] = False + return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] + + def __init__( + self, + int_data: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + block_size: Tuple[int, ...], + shape: torch.Size, + quant_min: Optional[int] = None, + quant_max: Optional[int] = None, + input_quant_func: Optional[Callable] = None, + dtype=None, + *args, + **kwargs + ): + self.int_data = int_data + self.scale = scale + self.zero_point = zero_point + self.block_size = block_size + self.quant_min = quant_min + self.quant_max = quant_max + self.input_quant_func = input_quant_func + + def __repr__(self): + return ( + f"{self.__class__.__name__}(data={self.dequantize()}, shape={self.shape}, " + f"device={self.device}, dtype={self.dtype}, input_quant_func={self.input_quant_func}, requires_grad={self.requires_grad})" + ) + + def dequantize(self, output_dtype=torch.float32): + return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, output_dtype=output_dtype) + + def __tensor_flatten__(self): + return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.input_quant_func, self.dtype] + + @classmethod + def __tensor_unflatten__( + cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride + ): + int_data, scale, zero_point = tensor_data_dict["int_data"], tensor_data_dict["scale"], tensor_data_dict["zero_point"] + block_size, shape, quant_min, quant_max, input_quant_func, dtype = tensor_attributes + return cls( + int_data, + scale, + zero_point, + block_size, + shape if outer_size is None else outer_size, + quant_min, + quant_max, + input_quant_func=input_quant_func, + dtype=dtype, + strides=outer_stride, + ) + + @classmethod + def from_float( + cls, + input_float, + mapping_type, + block_size, + target_dtype, + quant_min = None, + quant_max = None, + eps = None, + scale_dtype = None, + zero_point_dtype = None, + input_quant_func = None, + ): + scale, zero_point = choose_qparams_affine(input_float, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype) + int_data = quantize_affine(input_float, block_size, scale, zero_point, target_dtype, quant_min, quant_max) + return cls( + int_data, + scale, + zero_point, + block_size, + input_float.shape, + quant_min, + quant_max, + input_quant_func=input_quant_func, + dtype=input_float.dtype + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + kwargs = {} if kwargs is None else kwargs + + if func is torch.nn.functional.linear: + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + args[2] if len(args) > 2 else None, + ) + if weight_qtensor.input_quant_func is not None: + input_tensor = weight_qtensor.input_quant_func(input_tensor) + input_tensor = input_tensor.dequantize() + weight_tensor = weight_qtensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + + try: + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) + except: + print(f"ERR: subclass doesn't implement {func}") + + + def _get_to_kwargs(self, *args, **kwargs): + device, dtype, _, memory_format = torch._C._nn._parse_to(*args, **kwargs) + device = self.device if device is None else device + dtype = self.dtype if dtype is None else dtype + memory_format = ( + memory_format if memory_format is not None else torch.preserve_format + ) + kwargs = { + "device": device, + "dtype": dtype, + "memory_format": memory_format, + } + return kwargs + + def to(self, *args, **kwargs): + kwargs = self._get_to_kwargs(*args, **kwargs) + return self.__class__( + self.int_data.to(kwargs["device"]), + self.scale.to(kwargs["device"]), + self.zero_point.to(kwargs["device"]), + self.block_size, + self.shape, + self.quant_min, + self.quant_max, + self.input_quant_func, + **kwargs, + ) + + def _apply_fn_to_data(self, fn): + return self.__class__( + fn(self.int_data), + fn(self.scale), + fn(self.zero_point), + self.block_size, + self.shape, + self.quant_min, + self.quant_max, + self.input_quant_func, + dtype=self.dtype, + ) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + # two scenarios where we currently fall back to vanilla mm: + # 1 - when tensor is on CPU: we are missing qmm for CPU, but we should have a CPU implementation + # for consistency and to allow people to test + # 2 - we're given non-floats - quantizing long to int8 is crazy + if ( + func in [aten.mm.default, aten.addmm.default] + and args[0].is_floating_point() + and args[0].is_cuda + ): + if func == aten.addmm.default: + assert args[1].shape[-1] == args[2].shape[0], ( + f"need mat1 shape: {args[1].shape} final" + f"dim to match mat2 shape: {args[2].shape} first dim " + ) + input_tensor, weight_qtensor, bias = ( + args[1], + args[2], + args[0], + ) + else: + assert args[0].shape[-1] == args[1].shape[0], ( + f"need mat1 shape: {args[0].shape} final dim" + f"to match mat2 shape: {args[1].shape} first dim" + ) + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + None if len(args) == 2 else args[2], + ) + if weight_qtensor.input_quant_func is not None: + input_tensor = weight_qtensor.input_quant_func(input_tensor) + input_tensor = input_tensor.dequantize() + weight_tensor = weight_qtensor.dequantize() + return func(input_tensor, weight_tensor, bias) + + if (func is aten.detach.default or + func is aten.clone.default or + func is aten._to_copy.default): + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) + ) + + if func is aten.clone.default: + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) + ) + + if func is aten.t.default: + # TODO: need to implement this + # args[0].transposed = not args[0].transposed + # new = args[0]._change_shape(args[0].shape[::-1]) + # return return_and_correct_aliasing(func, args, kwargs, new) + raise Exception("transpose not implemented yet") + + if func is aten._to_copy.default: + return return_and_correct_aliasing( + func, + args, + kwargs, + args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), + ) From cce59609220cdbe5714a9666c9af106fe7fa0d66 Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Tue, 7 May 2024 10:18:21 -0700 Subject: [PATCH 20/61] add expecttest to requirements.txt (#225) * add expecttest to requirements.txt * update --- dev-requirements.txt | 3 +-- requirements.txt | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 76a984d939..6dadb274aa 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,6 +1,5 @@ # Test utilities pytest==7.4.0 -expecttest unittest-xml-reporting parameterized packaging @@ -9,7 +8,7 @@ transformers # For prototype features and benchmarks bitsandbytes #needed for testing triton quant / dequant ops for 8-bit optimizers matplotlib -pandas +pandas # Custom CUDA Extensions ninja diff --git a/requirements.txt b/requirements.txt index 1420797da0..0e6e860a5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,5 @@ torch numpy sentencepiece packaging +expecttest # So we can use IS_FBCODE flag +hypothesis # Avoid test derandomization warning From b34d1acaa9bd119c4b721dbc4a730f6c3a1cd6c9 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 7 May 2024 11:06:25 -0700 Subject: [PATCH 21/61] Install dev-requirements.txt in doc build (#224) Install dev-requirements.txt --------- Co-authored-by: Mark Saroufim --- .github/workflows/doc_build.yml | 1 + docs/source/api_ref_sparsity.rst | 6 +++++- docs/source/index.rst | 4 ++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/doc_build.yml b/.github/workflows/doc_build.yml index 2903e9cf81..125fa83e52 100644 --- a/.github/workflows/doc_build.yml +++ b/.github/workflows/doc_build.yml @@ -41,6 +41,7 @@ jobs: run: | python -m pip install torch python -m pip install -e . + pip install -r dev-requirements.txt cd docs python -m pip install -r requirements.txt - name: Build docs diff --git a/docs/source/api_ref_sparsity.rst b/docs/source/api_ref_sparsity.rst index 9417d9befb..8023d0bacc 100644 --- a/docs/source/api_ref_sparsity.rst +++ b/docs/source/api_ref_sparsity.rst @@ -3,7 +3,7 @@ ================ torchao.sparsity ================ - +.. automodule:: torchao.sparsity .. currentmodule:: torchao.sparsity .. autosummary:: @@ -12,3 +12,7 @@ torchao.sparsity WandaSparsifier PerChannelNormObserver + apply_sparse_semi_structured + apply_fake_sparsity + + diff --git a/docs/source/index.rst b/docs/source/index.rst index fb1649fa48..cea80772d8 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -92,9 +92,9 @@ with more content coming soon. :maxdepth: 1 :caption: API Reference - api_ref_intro api_ref_sparsity + api_ref_intro api_ref_quantization api_ref_dtypes - .. +.. api_ref_kernel From 98493600580da5c63213d3642934a388de34c097 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 7 May 2024 13:46:00 -0700 Subject: [PATCH 22/61] Fix an error in subclass impl (#226) Summary: Accidently changed the device check code for old subclass instead of the new one, forgot to fix before landing Test Plan: CI Reviewers: Subscribers: Tasks: Tags: --- torchao/quantization/subclass.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 148228a030..6128720d4d 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -139,21 +139,14 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): - # Note: we only added cpu path here for 8da4w, this is for executorch, in the future - # 1. we'll add cpu/cuda version (int4mm etc.) - # 2. we'll need to hide the 8da4w executorch version under things like layouts (we also have multiple impl for cpu kernel as Michael mentioned), so it will be something like - # cpu device + et laytout --> gives current 8da4w executorch representation - # cpu device + avx layout --> gives optimized kernel for 8da4w in avx cpu etc. - # cuda device + some layout --> gives cuda kernel - # two scenarios where we currently fall back to vanilla mm: - # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized - # kernels in CPU as well, see the note above + # 1 - when tensor is on CPU: we are missing qmm for CPU, but we should have a CPU implementation + # for consistency and to allow people to test # 2 - we're given non-floats - quantizing long to int8 is crazy if ( func in [aten.mm.default, aten.addmm.default] and args[0].is_floating_point() - and args[0].device == torch.device("cpu") + and args[0].is_cuda ): if func == aten.addmm.default: assert args[1].shape[-1] == args[2].shape[0], ( @@ -803,14 +796,21 @@ def _apply_fn_to_data(self, fn): @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): + # Note: we only added cpu path here for 8da4w, this is for executorch, in the future + # 1. we'll add cpu/cuda version (int4mm etc.) + # 2. we'll need to hide the 8da4w executorch version under things like layouts (we also have multiple impl for cpu kernel as Michael mentioned), so it will be something like + # cpu device + et laytout --> gives current 8da4w executorch representation + # cpu device + avx layout --> gives optimized kernel for 8da4w in avx cpu etc. + # cuda device + some layout --> gives cuda kernel + # two scenarios where we currently fall back to vanilla mm: - # 1 - when tensor is on CPU: we are missing qmm for CPU, but we should have a CPU implementation - # for consistency and to allow people to test + # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized + # kernels in CPU as well, see the note above # 2 - we're given non-floats - quantizing long to int8 is crazy if ( func in [aten.mm.default, aten.addmm.default] and args[0].is_floating_point() - and args[0].is_cuda + and args[0].device == torch.device("cpu") ): if func == aten.addmm.default: assert args[1].shape[-1] == args[2].shape[0], ( @@ -833,6 +833,7 @@ def __torch_dispatch__(cls, func, types, args, kwargs): None if len(args) == 2 else args[2], ) if weight_qtensor.input_quant_func is not None: + # dynamic quantization input_tensor = weight_qtensor.input_quant_func(input_tensor) input_tensor = input_tensor.dequantize() weight_tensor = weight_qtensor.dequantize() From 63c5ac5444fc19b208dfc08f31cb3b34145cf77a Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Wed, 8 May 2024 10:13:37 -0700 Subject: [PATCH 23/61] Some follow up fixes for quant primitives (#220) Summary: att Test Plan: python test/quantization/test_quant_primitives.py -k test_raises Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 19 +++++++++++++++++++ torchao/quantization/quant_primitives.py | 17 +++++++++++------ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 90fd8f8bf0..87c12c60d5 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -279,5 +279,24 @@ def test_get_group_qparams_symmetric_memory(self): after_choose_qparams_mem_use = torch.cuda.memory_allocated() self.assertTrue(after_choose_qparams_mem_use < 1.2 * original_mem_use) + def test_raises(self): + """Make sure some errors are raised when user requested an unsupported type of quantization + """ + input = torch.randn(10, 10) + mapping_type = MappingType.ASYMMETRIC + dtype = torch.int8 + block_size = (10, 10) + scale, zero_point = choose_qparams_affine(input, mapping_type, block_size, dtype) + + + # make sure we can't quantize int32 tensors: + with self.assertRaisesRegex(AssertionError, "Unsupported input dtype:"): + _ = quantize_affine(input.to(torch.int32), block_size, scale, zero_point, dtype) + + # block_size and scale/zero_point shape mismatch + block_size = (1, 1) + with self.assertRaisesRegex(RuntimeError, "is invalid for input of size 1"): + _ = quantize_affine(input, block_size, scale, zero_point, dtype) + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index f59144becd..b435d5a893 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -145,9 +145,9 @@ def quantize_affine( ): """ Args: - input (torch.Tensor): original float32 or bfloat16 Tensor + input (torch.Tensor): original float32, float16 or bfloat16 Tensor block_size: (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam - e.g. when size is the same as the input tensor dimension, we are using per tensor quantization + e.g. when size is the same as the input tensor dimension, we are using per tensor quantization scale (float): quantization parameter for affine quantization zero_point (int): quantization parameter for affine quantization output_dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor @@ -171,6 +171,8 @@ def quantize_affine( quantized tensor with requested dtype """ # TODO: validations + # TODO: validate scale/zero_point dimensions are compatible with block_size + assert input.dtype in [torch.float32, torch.float16, torch.bfloat16], f"Unsupported input dtype: {input.dtype}" quant_min, quant_max = _get_and_check_qmin_qmax(output_dtype, quant_min, quant_max) shape_for_reduction, reduction_dims = _get_reduction_params(block_size, input.size()) original_shape = input.shape @@ -198,7 +200,7 @@ def dequantize_affine( quant_min: Optional[int] = None, quant_max: Optional[int] = None, *, - output_dtype: Optional[torch.dtype] = None, + output_dtype: torch.dtype = torch.float32, ): """ Args: @@ -210,13 +212,15 @@ def dequantize_affine( dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor quant_min (Optional[int]): minimum quantized value for input Tensor quant_max (Optional[int]): maximum quantized value for input Tensor - output_dtype (torch.dtype?): optional dtype for output Tensor, default is fp32 + output_dtype (torch.dtype): dtype for output Tensor, default is fp32 Output: dequantized Tensor, with requested dtype or fp32 """ # TODO: validations + # TODO: validate scale/zero_point dimensions are compatible with block_size assert input.dtype == input_dtype + assert output_dtype in [torch.float32, torch.float16, torch.bfloat16], f"Unsupported output dtype: {output_dtype}" quant_min, quant_max = _get_and_check_qmin_qmax(input_dtype, quant_min, quant_max) shape_for_reduction, reduction_dims = _get_reduction_params(block_size, input.size()) @@ -229,9 +233,10 @@ def dequantize_affine( if zero_point is not None: zero_point = zero_point.view(shape_after_reduction) - dequant = input.to(output_dtype) + dequant = input.to(torch.int32) if zero_point is not None: - dequant -= zero_point + dequant -= zero_point.to(torch.int32) + dequant = dequant.to(output_dtype) dequant *= scale dequant = dequant.view(original_shape) return dequant.to(output_dtype) From f6d56ca1094c2dc24d9f885227aa66c84d0ddf13 Mon Sep 17 00:00:00 2001 From: HDCharles <39544797+HDCharles@users.noreply.github.com> Date: Wed, 8 May 2024 16:13:38 -0400 Subject: [PATCH 24/61] Composing autoquant with compile (#175) * Composing autoquant with compile Summary: this PR rewrites how torchao.autoquant works so that it works with torch.compile. Previously you had to do: torchao.autoquant(model, input) mod=torch.compile(model) mod(input) now you can do torchao.autoquant(torch.compile(model)) model(input) The new method works with/without compile. Also this is BC so the old path also works. We use a forward_prehook to intercept the model call before torch.compile tracing occurs at which point we do the autoquantization and clean up all remaining hooks before passing things off to the normal torch.compile tracing functionality. note: in the case of multiple inputs, you can also do: model.forward_log_only(input) to run the model forward with autoquant shape logging and prevent the torch.compile tracing/autoquant quantization from occuring. Test Plan: python test/integration/test_integration.py -k "autoquant" Reviewers: Subscribers: Tasks: Tags: * Fused DoRA kernels (#216) * add dora kernels * allowing error_on_unseen in autoquant func Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * Unified AffineQuantizedTensor subclass (#214) Summary: Creatd a `AffineQuantizedTensor` subclass that works for both weight and input (for dynamic quantization), for all granularities (levering the recently added choose_qparams_affine, quantize_affine and dequantize_affine ops) only verified for 8da4w right now, we can make it work for other types of quantization (mostly the operator dispatching part) later Test Plan: python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_8da4w Reviewers: Subscribers: Tasks: Tags: Co-authored-by: Mark Saroufim * add expecttest to requirements.txt (#225) * add expecttest to requirements.txt * update * Install dev-requirements.txt in doc build (#224) Install dev-requirements.txt --------- Co-authored-by: Mark Saroufim * Fix an error in subclass impl (#226) Summary: Accidently changed the device check code for old subclass instead of the new one, forgot to fix before landing Test Plan: CI Reviewers: Subscribers: Tasks: Tags: * update readme.md Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * trying to fix the error in CI on cleanup hooks Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * correct docs Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * Some follow up fixes for quant primitives (#220) Summary: att Test Plan: python test/quantization/test_quant_primitives.py -k test_raises Reviewers: Subscribers: Tasks: Tags: * Composing autoquant with compile Summary: this PR rewrites how torchao.autoquant works so that it works with torch.compile. Previously you had to do: torchao.autoquant(model, input) mod=torch.compile(model) mod(input) now you can do torchao.autoquant(torch.compile(model)) model(input) The new method works with/without compile. Also this is BC so the old path also works. We use a forward_prehook to intercept the model call before torch.compile tracing occurs at which point we do the autoquantization and clean up all remaining hooks before passing things off to the normal torch.compile tracing functionality. note: in the case of multiple inputs, you can also do: model.forward_log_only(input) to run the model forward with autoquant shape logging and prevent the torch.compile tracing/autoquant quantization from occuring. Test Plan: python test/integration/test_integration.py -k "autoquant" Reviewers: Subscribers: Tasks: Tags: * allowing error_on_unseen in autoquant func Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * update readme.md Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * trying to fix the error in CI on cleanup hooks Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: * correct docs Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: --------- Co-authored-by: jeromeku Co-authored-by: Jerry Zhang Co-authored-by: Mark Saroufim Co-authored-by: Svetlana Karslioglu --- README.md | 9 ++-- test/integration/test_integration.py | 63 +++++++++++++++++++--- torchao/quantization/README.md | 10 ++-- torchao/quantization/autoquant.py | 80 +++++++++++++++++++++++----- torchao/quantization/quant_api.py | 4 +- 5 files changed, 133 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 80f4a932d5..21a7195c27 100644 --- a/README.md +++ b/README.md @@ -44,12 +44,9 @@ torch._inductor.config.use_mixed_mm = True model = torch.nn.Sequential(torch.nn.Linear(32, 64)).cuda().to(torch.bfloat16) input = torch.randn(32,32, dtype=torch.bfloat16, device='cuda') -# perform autoquantization -torchao.autoquant(model, (input)) - -# compile the model to recover performance -model = torch.compile(model, mode='max-autotune') -model(input) +# perform autoquantization and compilation +q_model = torchao.autoquant(torch.compile(model, mode='max-autotune')) +q_model(input) ``` ### Sparsity diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 0d11093fd1..e6da3e7340 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1388,7 +1388,7 @@ def test_autoquant_one_input(self, device, dtype, m, k, n): torch.nn.ReLU(), ).to(device).to(dtype) out = model(example_input) - torchao.autoquant(model, example_input) + torchao.autoquant(model) out2 = model(example_input) sqnr = SQNR(out, out2) self.assertTrue(sqnr >= 30) @@ -1400,7 +1400,9 @@ def test_autoquant_one_input(self, device, dtype, m, k, n): (32, 32, 128, 128), ])) @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "autoquant requires 2.3+.") - def test_autoquant_multi_input(self, device, dtype, m1, m2, k, n): + def test_autoquant_compile(self, device, dtype, m1, m2, k, n): + if device != "cuda" and dtype != torch.bfloat16: + self.skipTest(f"autoquant currently does not support {device}") if device != "cuda" or not torch.cuda.is_available(): self.skipTest(f"autoquant currently does not support {device}") if torch.cuda.is_available() and torch.cuda.get_device_capability() < (8, 0): @@ -1414,15 +1416,60 @@ def test_autoquant_multi_input(self, device, dtype, m1, m2, k, n): torch.nn.ReLU(), ).to(device).to(dtype) example_input = torch.randn(m1, k, device=device, dtype=dtype) - example_input2 = torch.randn(m2, k, device=device, dtype=dtype) - torchao.quantization.change_linears_to_autoquantizable(model) - out=model(example_input) - model(example_input2) - torchao.quantization.change_autoquantizable_to_quantized(model) - out2 = model(example_input) + example_input2 = torch.randn(m1, k, device=device, dtype=dtype) + out = model(example_input) + + mod = torchao.autoquant(torch.compile(model)) + mod.forward_log_only(example_input) + mod(example_input2) + + out2 = mod(example_input) sqnr = SQNR(out, out2) self.assertTrue(sqnr >= 30) + @parameterized.expand(combine_parameters(COMMON_DEVICE_DTYPE, + [ + (1, 1, 128, 128), + (1, 32, 128, 128), + (32, 32, 128, 128), + ])) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "autoquant requires 2.3+.") + def test_autoquant_kwargs(self, device, dtype, m1, m2, k, n): + if device != "cuda" and dtype != torch.bfloat16: + self.skipTest(f"autoquant currently does not support {device}") + if device != "cuda" or not torch.cuda.is_available(): + self.skipTest(f"autoquant currently does not support {device}") + if torch.cuda.is_available() and torch.cuda.get_device_capability() < (8, 0): + if dtype == torch.bfloat16: + self.skipTest(f"bfloat16 requires sm80+") + if m1 == 1 or m2 == 1: + self.skipTest(f"Shape {(m1, m2, k, n)} requires sm80+") + + class NeedsKwargs(torch.nn.Module): + def __init__(self): + super().__init__() + self.rel = torch.nn.ReLU() + self.lin = torch.nn.Linear(k,n) + + def forward(self, x, y): + x = self.rel(x) + z = self.lin(x + y) + return z + + model = NeedsKwargs().to(device).to(dtype) + example_input = { + "x": torch.randn(m1, k, device=device, dtype=dtype), + "y": torch.randn(m1, k, device=device, dtype=dtype), + } + out = model(**example_input) + + mod = torchao.autoquant(torch.compile(model)) + mod.forward_log_only(**example_input) + mod(**example_input) + + out2 = mod(**example_input) + sqnr = SQNR(out, out2) + self.assertTrue(sqnr >= 30) class TestAOTI(unittest.TestCase): @parameterized.expand( diff --git a/torchao/quantization/README.md b/torchao/quantization/README.md index fc8dbf0137..622ec1cbcf 100644 --- a/torchao/quantization/README.md +++ b/torchao/quantization/README.md @@ -28,11 +28,11 @@ torch._inductor.config.use_mixed_mm = True model = torch.nn.Sequential(torch.nn.Linear(32, 64)).cuda().to(torch.bfloat16) input = torch.randn(32,32, dtype=torch.bfloat16, device='cuda') -# perform autoquantization -torchao.autoquant(model, (input)) +# perform autoquantization and torch.compile +model = torchao.autoquant(torch.compile(model, mode='max-autotune')) -# compile the model to improve performance -model = torch.compile(model, mode='max-autotune') +# pass in an input which is used in order to pick fastest quantization operations +# and apply torch compilation. model(input) ``` @@ -167,6 +167,6 @@ model(input) ## Notes -1. APIs have been hardware tested on A100 and T4(colab) +1. APIs have been hardware tested on A100 and T4(colab) 2. While these techniques are designed to improve model performance, in some cases the opposite can occur. This is because quantization adds additional overhead to the model that is hopefully made up for by faster matmuls (dynamic quantization) or loading weights faster (weight-only quantization). If your matmuls are small enough or your non-quantized perf isn't bottlenecked by weight load time, these techniques may reduce performance. 3. Use the PyTorch nightlies so you can leverage [tensor subclasses](https://pytorch.org/docs/stable/notes/extending.html#subclassing-torch-tensor) which is preferred over older module swap based methods because it doesn't modify the graph and is generally more composable and flexible. diff --git a/torchao/quantization/autoquant.py b/torchao/quantization/autoquant.py index 9f2b59f20a..fc38c04169 100644 --- a/torchao/quantization/autoquant.py +++ b/torchao/quantization/autoquant.py @@ -74,6 +74,7 @@ def tune_autoquant(self, q_cls, shapes_and_dtype, best_time): res = q_cls._autoquant_test(act_mat, self.weight, bias, best_time, self.mode) update_cache(q_cls, shapes_and_dtype, res) + @torch.no_grad() def to_quantized(self, error_on_unseen, **kwargs): if error_on_unseen and self.logged_data == {}: raise RuntimeError("must run module normally to get shape, dtype info for autoquant") @@ -123,7 +124,7 @@ def count_shapes(self, do_print=True): torch._dynamo.reset() cur_time += check_cache(q_cls, shapes_and_dtype) * times_seen if shape_count is not None and shape_count > 1: - print(f">total_time: {cur_time:0.3f}ms for {q_cls}, prev_best: {best_time:0.3f}ms") + print(f">time (all shapes): {cur_time:0.3f}ms for {q_cls}, prev_best: {best_time:0.3f}ms") if best_time >= cur_time: best_time = cur_time best_cls = q_cls @@ -176,6 +177,7 @@ def __torch_dispatch__(cls, func, types, args, kwargs): if func is aten.detach.default: return return_and_correct_aliasing(func, args, kwargs, args[0]._apply_fn_to_data(torch.detach)) +@torch.no_grad() def do_autoquant_bench(op, *args, **kwargs): """ runs benchmark op(*args, **kwargs) avoiding torch.compile overhead @@ -335,6 +337,7 @@ def change_linears_to_autoquantizable(model, **kwargs): """ from torchao.quantization.quant_api import _is_linear filter_fn = kwargs.pop("filter_fn", _is_linear) + _ = kwargs.pop("error_on_unseen", True) # same kwargs used for this and to_quantized kwargs["qtensor_class_list"] = kwargs.get("qtensor_class_list", DEFAULT_CLASS_LIST) kwargs["mode"] = kwargs.get("mode", ["relu", None]) from torchao.quantization.quant_api import _replace_with_custom_fn_if_matches_filter @@ -374,20 +377,71 @@ def change_autoquantizable_to_quantized(model, **kwargs): torch._dynamo.reset() @torch.no_grad() -def autoquant(model, example_input, qtensor_class_list=DEFAULT_CLASS_LIST, filter_fn=None, mode=["relu",None], **kwargs): +def autoquant(model, example_input=None, qtensor_class_list=DEFAULT_CLASS_LIST, filter_fn=None, mode=["relu",None], **aq_kwargs): """ - Runs the model with example_input to record shapes and then compares benchmark performance of the seen shape - across the qtensor subclasses in qtensor_class_list. Determines best performing qtensor subclass for each layer - and applies that type of quantization. + wraps model in AutoQuantWrapper, if example_input is provided, runs forward on it, otherwise returns the wrapped model. + AutoQuantWrapper handles instances where model is torch.compiled by first performing autoquantization on the original + model and then letting the torch.compile run/tracing occur. + + Example usage:: + + torchao.autoquant(torch.compile(model)) + model(*example_input) + """ - if filter_fn is None: - from torchao.quantization.quant_api import _is_linear - filter_fn = _is_linear + # the hook we will use to intercept the model forward and perform + # autoquantization + def autoquant_prehook(module, args, kwargs): + module.forward_log_only(*args, **kwargs) + change_autoquantizable_to_quantized( + module, + **aq_kwargs, + ) + module.clean_up_autoquant_hooks_and_attrs() + return args, kwargs + + # perform initial swap from linear weights + # to AutoQuantizableLinearWeight + change_linears_to_autoquantizable( + model, + filter_fn=filter_fn, + qtensor_class_list=qtensor_class_list, + mode=mode, + **aq_kwargs + ) + + # access actual model of torch.compile wrapper if needed + if isinstance(model, torch._dynamo.eval_frame.OptimizedModule): + real_model = model._orig_mod + else: + real_model = model + + # we need a consistent way to run the model which bypasses both + # A) the torch.compile tracing (so we need to run the inner model directly) + # B) the autoquant_prehook we're about to register (so we call forward directly) + model.forward_log_only = lambda *args, **kwargs: real_model.forward(*args, **kwargs) + + # the autoquant_prehook intercepts the forward call and performs autoquantization + # and then deletes the hook. if model is a torch.compile wrapper, it then + # does the tracing/compile since the prehook is naturally followed by the normal. + # model run. + handle = model.register_forward_pre_hook(autoquant_prehook, with_kwargs=True) + + # note the torch.compile wrapper eval_frame moved the assignment of any assigned + # attributes to the inner model, so we have to call delattr on the inner model + def clean_up_autoquant_hooks_and_attrs(): + try: + handle.remove() + delattr(real_model, "clean_up_autoquant_hooks_and_attrs") + delattr(real_model, "forward_log_only") + except: + pass + model.clean_up_autoquant_hooks_and_attrs = clean_up_autoquant_hooks_and_attrs - change_linears_to_autoquantizable(model, filter_fn=filter_fn, qtensor_class_list=qtensor_class_list, mode=mode, **kwargs) - if not isinstance(example_input, (tuple, list)): - assert isinstance(example_input, torch.Tensor) + # if example input was provided, check it and run it + if isinstance(example_input, torch.Tensor): example_input = [example_input] - model(*example_input) - change_autoquantizable_to_quantized(model, **kwargs) + if isinstance(example_input, (tuple, list)): + model(*example_input) + return model diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index 2dcd935912..a5a3a2b3db 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -34,6 +34,7 @@ Int4WeightOnlyGPTQQuantizer, Int4WeightOnlyQuantizer, ) +from .autoquant import autoquant __all__ = [ @@ -46,7 +47,8 @@ "Quantizer", "TwoStepQuantizer", "Int4WeightOnlyGPTQQuantizer", - "Int4WeightOnlyQuantizer" + "Int4WeightOnlyQuantizer", + "autoquant" ] if TORCH_VERSION_AFTER_2_3: From b91b6be24afd1220331790ff0866f5b091165cd5 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Wed, 8 May 2024 19:46:45 -0700 Subject: [PATCH 25/61] Add more options in choose_qparams_affine for tinygemm op (#227) Summary: This is in preparation for replacing tinygemm q/dq ops with the unified quant primitive ops Test Plan: python test/quantization/test_quant_primitives.py -k test_tinygemm_get_groupwise_affine_qparams Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 73 ++++++++++++++++++++-- torchao/quantization/quant_primitives.py | 32 ++++++++-- 2 files changed, 95 insertions(+), 10 deletions(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 87c12c60d5..291039e42a 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -10,6 +10,7 @@ import torch from torchao.quantization.quant_primitives import ( get_group_qparams_symmetric, + get_groupwise_affine_qparams, quantize_affine, dequantize_affine, choose_qparams_affine, @@ -56,8 +57,8 @@ def test_get_group_qparams_symmetric(self): scale_obs = scale_obs.reshape(weight.shape[0], -1) # assert that scales are identical - (scale_ao, _) = get_group_qparams_symmetric(weight, n_bit, groupsize) - torch.testing.assert_allclose(scale_obs, scale_ao, rtol=0, atol=0) + (scale_ao, _) = get_group_qparams_symmetric(weight, n_bit, groupsize, precision=torch.float16) + torch.testing.assert_close(scale_obs, scale_ao, rtol=0, atol=0) def test_choose_qparams_group_sym(self): """Note: groupwise asymmetric quant is using a different way of computing zero_points, so @@ -88,7 +89,7 @@ def test_choose_qparams_token_asym(self): scale_ref = scale_ref.squeeze() zp_ref = zp_ref.squeeze() - torch.testing.assert_allclose(scale, scale_ref, atol=10e-3, rtol=10e-3) + torch.testing.assert_close(scale, scale_ref, atol=10e-3, rtol=10e-3) self.assertTrue(torch.equal(zero_point, zp_ref)) def test_choose_qparams_tensor_asym(self): @@ -257,7 +258,7 @@ def test_quantize_dequantize_channel_asym_4d_multi_dim_reduction(self): quantized = quantize_affine(input, block_size, scale, zero_point, dtype) dequantized = dequantize_affine(quantized, block_size, scale, zero_point, dtype, output_dtype=torch.float32) # we don't have corresponding ops in existing primitives, so just make sure it runs and it's close to float - torch.testing.assert_allclose(dequantized, input, rtol=2, atol=0.02) + torch.testing.assert_close(dequantized, input, rtol=2, atol=0.02) def test_choose_qparams_tensor_asym_eps(self): input = torch.zeros(10, 10) @@ -298,5 +299,69 @@ def test_raises(self): with self.assertRaisesRegex(RuntimeError, "is invalid for input of size 1"): _ = quantize_affine(input, block_size, scale, zero_point, dtype) + def test_not_preserve_zero_not_supported(self): + """Making sure preserve_zero == False is not supported for symmetric quant""" + input = torch.randn(10, 256) + n_bit = 4 + mapping_type = MappingType.SYMMETRIC + dtype = torch.int8 + block_size = (1, 128) + quant_min = 0 + quant_max = 2**n_bit - 1 + eps = 1e-6 + scale_dtype = torch.bfloat16 + zero_point_dtype = torch.bfloat16 + with self.assertRaisesRegex(ValueError, "preserve_zero == False is not supported for symmetric quantization"): + choose_qparams_affine( + input, + mapping_type, + block_size, + dtype, + quant_min, + quant_max, + eps, + scale_dtype=scale_dtype, + zero_point_dtype=zero_point_dtype, + preserve_zero=False, + ) + + + def test_tinygemm_get_groupwise_affine_qparams(self): + input = torch.randn(10, 256) + n_bit = 4 + scale_ref, zero_point_ref = get_groupwise_affine_qparams(input, n_bit=n_bit, groupsize=128, dtype=torch.bfloat16) + + mapping_type = MappingType.ASYMMETRIC + dtype = torch.int8 + block_size = (1, 128) + quant_min = 0 + quant_max = 2**n_bit - 1 + eps = 1e-6 + scale_dtype = torch.bfloat16 + zero_point_dtype = torch.bfloat16 + scale, zero_point = \ + choose_qparams_affine( + input, + mapping_type, + block_size, + dtype, + quant_min, + quant_max, + eps, + scale_dtype=scale_dtype, + zero_point_dtype=zero_point_dtype, + preserve_zero=False, + ) + + def int_zero_point_to_float(zero_point, scale, qaunt_min, mid_point): + return (quant_min - zero_point + mid_point) * scale + + mid_point = 2 ** (n_bit - 1) + zero_point_float = int_zero_point_to_float(zero_point, scale, quant_min, mid_point) + + self.assertTrue(torch.equal(scale, scale_ref)) + torch.testing.assert_close(zero_point_float, zero_point_ref, rtol=0.00001, atol=torch.max(scale)*0.03) + + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index b435d5a893..3975284b61 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -256,19 +256,29 @@ def choose_qparams_affine( eps: Optional[float] = None, scale_dtype: Optional[torch.dtype] = None, zero_point_dtype: Optional[torch.dtype] = None, + preserve_zero = True, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: input (torch.Tensor): fp32, bf16, fp16 input Tensor mapping_type (MappingType): determines how the qparams are calculated, symmetric or asymmetric - block_size: (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam - e.g. when size is the same as the input tensor dimension, we are using per tensor quantization + block_size: (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam + e.g. when size is the same as the input tensor dimension, we are using per tensor quantization target_dtype (torch.dtype): dtype for target quantized Tensor quant_min (Optional[int]): minimum quantized value for target quantized Tensor quant_max (Optioanl[int]): maximum quantized value for target quantized Tensor eps (Optional[float]): minimum scale, if not provided, default to eps of input.dtype scale_dtype (torch.dtype): dtype for scale Tensor zero_point_dtype (torch.dtype): dtype for zero_point Tensor + preserve_zero (bool): a flag to indicate whether we need zero to be exactly + representable or not, this is typically required for ops that needs zero padding, like convolution + it's less important for ops that doesn't have zero padding in the op itself, like linear. + + For example, given a floating point Tensor [1.2, 0.1, 3.0, 4.0, 0.4, 0], if `preserve_zero` is True, + we'll make sure there is a integer value corresponding to the floating point 0, e.g. [-3, -8, 3, 7, -7, -8], 0 will be mapped to `-8` without loss. But if `preserve_zero` is not True, there won't be such + gurantee. + + If we don't need zero to be exactly representable, we won't do rounding and clamping for zero_point Output: Tuple of scales and zero_points Tensor with requested dtype @@ -288,17 +298,27 @@ def choose_qparams_affine( min_val = torch.amin(input, dim=reduction_dims, keepdim=False) max_val = torch.amax(input, dim=reduction_dims, keepdim=False) - min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) - max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + if preserve_zero: + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + else: + min_val_neg = min_val + max_val_pos = max_val if mapping_type == MappingType.SYMMETRIC: max_val_pos = torch.max(-min_val_neg, max_val_pos) scale = max_val_pos / (float(quant_max - quant_min) / 2) + if not preserve_zero: + raise ValueError("preserve_zero == False is not supported for symmetric quantization") zero_point = torch.full_like(scale, int((quant_min + quant_max + 1) / 2)) else: scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min) - zero_point = quant_min - torch.round(min_val_neg / scale) - zero_point = torch.clamp(zero_point, quant_min, quant_max) + if preserve_zero: + zero_point = quant_min - torch.round(min_val_neg / scale) + zero_point = torch.clamp(zero_point, quant_min, quant_max) + else: + zero_point = quant_min - min_val_neg / scale + if eps is None: eps = torch.finfo(input.dtype).eps From 921a1045ffd2fb5519304303d87cbd233e264aea Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Thu, 9 May 2024 09:16:40 -0700 Subject: [PATCH 26/61] Revert pyproject.toml changes (#232) * Revert pyproject.toml changes * Delete pyproject.toml --- .github/workflows/regression_test.yml | 2 +- pyproject.toml | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 pyproject.toml diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_test.yml index a779f6cf0d..971515747e 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_test.yml @@ -67,5 +67,5 @@ jobs: pip install ${{ matrix.torch-spec }} pip install -r requirements.txt pip install -r dev-requirements.txt - python setup.py install + pip install . pytest test --verbose -s diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index edf5df1398..0000000000 --- a/pyproject.toml +++ /dev/null @@ -1,3 +0,0 @@ -[build-system] -requires = ["setuptools", "wheel", "ninja", "torch"] -build-backend = "setuptools.build_meta" From 0c5c814ed7a61b61bf932abc0944ba5af6389560 Mon Sep 17 00:00:00 2001 From: cpuhrsch Date: Thu, 9 May 2024 10:56:40 -0700 Subject: [PATCH 27/61] Bump version to 0.2.0 (#234) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3972cb2c76..7d4875cadb 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ def read_requirements(file_path): package_name = "torchao-nightly" if os.environ.get("TORCHAO_NIGHTLY") else "torchao" # Version is year.month.date if using nightlies -version = current_date if package_name == "torchao-nightly" else "0.1" +version = current_date if package_name == "torchao-nightly" else "0.2.0" import torch From ad12663c736c3ca5960fbb3a9b8f7df2d4d65b3a Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 9 May 2024 14:20:09 -0700 Subject: [PATCH 28/61] Skip depending on torch testing package (#235) Summary: We used to do ``` from torch.testing._internal.common_utils import IS_FBCODE ``` but this caused some internal failures, so we'll just copy paste the code for `IS_FBCODE` so that we don't depend on the testing package Test Plan: internal CI Reviewers: Subscribers: Tasks: Tags: --- torchao/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/torchao/__init__.py b/torchao/__init__.py index 340bfe3013..c982e09a0c 100644 --- a/torchao/__init__.py +++ b/torchao/__init__.py @@ -5,8 +5,12 @@ ) from . import dtypes import torch -from torch.testing._internal.common_utils import IS_FBCODE -if not IS_FBCODE: +_IS_FBCODE = ( + hasattr(torch._utils_internal, "IS_FBSOURCE") and + torch._utils_internal.IS_FBSOURCE +) + +if not _IS_FBCODE: from . import _C from . import ops From 7734f7928587442ad42caaeca69aea28f3ae4012 Mon Sep 17 00:00:00 2001 From: Thien Tran Date: Tue, 14 May 2024 23:52:48 +0800 Subject: [PATCH 29/61] Add FP16Act-FP6Weight Linear (#223) * add files from fp6_llm * try to port weight packing first * rename * rename fp6 weight packing * add fp16act_fp6weight_linear * fix function def * delete duplicate file * move weight quant file * rename * add pytorch interface for fp6 weight dequant * add fake_fp6 to fp6 * move weight_quant to csrc/cuda due to cuda_fp16.h dependency * add fake_fp6_to_fp6 test * add test for fp16act_fp6weight_linear * add test for fp6_weight_dequant * Fp6WeightOnlyQuantizedLinearWeight (not working yet) * skip some tests, since the functions are not built w/o CUDA * add the original test * implement transpose and clone so that F.linear will work * remove print * remove dequantize * add notes and some rename * typo * small cleanup * improve tensor subclass and add test (which is failing for torch-compile) * add note * add note * add qtorch as dev requirement * update error message * add __repr__ and fix transposed issue * add fp6 perplexity test * rename variables * remove subclass * add correctness test * remove unwanted changes * add apache 2.0 notice * add benchmark script * add note about FP6 kernel * relax tolerance --------- Co-authored-by: Mark Saroufim --- benchmarks/benchmark_fp6.py | 82 +++++++ setup.py | 4 +- test/test_ops.py | 93 ++++++++ torchao/csrc/cuda/fp6_llm/configs.h | 90 +++++++ torchao/csrc/cuda/fp6_llm/fp6_linear.cu | 184 +++++++++++++++ torchao/csrc/cuda/fp6_llm/kernel_matmul.cuh | 188 +++++++++++++++ .../csrc/cuda/fp6_llm/kernel_reduction.cuh | 63 +++++ torchao/csrc/cuda/fp6_llm/ptx_cp.async.cuh | 75 ++++++ torchao/csrc/cuda/fp6_llm/ptx_mma.cuh | 129 ++++++++++ torchao/csrc/cuda/fp6_llm/utils_core.cuh | 216 +++++++++++++++++ torchao/csrc/cuda/fp6_llm/utils_gmem.cuh | 91 ++++++++ .../cuda/fp6_llm/utils_parallel_dequant.cuh | 127 ++++++++++ torchao/csrc/cuda/fp6_llm/weight_quant.cu | 219 +++++++++++++++++ torchao/csrc/fp6_llm/README.md | 7 + torchao/csrc/fp6_llm/fp6_llm.cpp | 11 + torchao/csrc/fp6_llm/weight_prepacking.cpp | 220 ++++++++++++++++++ torchao/ops.py | 85 +++++++ 17 files changed, 1882 insertions(+), 2 deletions(-) create mode 100644 benchmarks/benchmark_fp6.py create mode 100644 torchao/csrc/cuda/fp6_llm/configs.h create mode 100644 torchao/csrc/cuda/fp6_llm/fp6_linear.cu create mode 100644 torchao/csrc/cuda/fp6_llm/kernel_matmul.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/kernel_reduction.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/ptx_cp.async.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/ptx_mma.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/utils_core.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/utils_gmem.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/utils_parallel_dequant.cuh create mode 100644 torchao/csrc/cuda/fp6_llm/weight_quant.cu create mode 100644 torchao/csrc/fp6_llm/README.md create mode 100644 torchao/csrc/fp6_llm/fp6_llm.cpp create mode 100644 torchao/csrc/fp6_llm/weight_prepacking.cpp diff --git a/benchmarks/benchmark_fp6.py b/benchmarks/benchmark_fp6.py new file mode 100644 index 0000000000..abe21d2f7d --- /dev/null +++ b/benchmarks/benchmark_fp6.py @@ -0,0 +1,82 @@ +import torch +import torchao +from torch.utils.benchmark import Timer +import pandas as pd +from tqdm import tqdm + + +def benchmark(m, k, n, splitK): + # Randomly initialize each bytes. The highest value for randint() is set the the max value of uint32_t. + fp6_weight = torch.randint(4294967295, (n, k // 16 * 3)).to(torch.int) + fp16_scale = torch.rand(n).half() + 0.5 + fp16_activation = torch.rand(m, k).half() + 0.5 + + fp6_weight_packed = torchao.ops.prepack_fp6_weight(fp6_weight) + act_cuda = fp16_activation.cuda() + weight_cuda = fp6_weight_packed.cuda() + scale_cuda = fp16_scale.cuda() + + # need to do this since Timer cannot see torchao + def fp6_linear(act_cuda, weight_cuda, scale_cuda, splitK): + return torchao.ops.fp16act_fp6weight_linear(act_cuda, weight_cuda, scale_cuda, splitK) + + fp6_output = fp6_linear(act_cuda, weight_cuda, scale_cuda, splitK) + + fp6_measurement = Timer( + stmt="fp6_linear(act_cuda, weight_cuda, scale_cuda, splitK)", + globals=locals(), + ).blocked_autorange() + + fp16_weight = torchao.ops.fp6_weight_dequant(fp6_weight, fp16_scale).cuda() + fp16_output = act_cuda @ fp16_weight.T + + fp16_measurement = Timer( + stmt="act_cuda @ fp16_weight.T", + globals=locals(), + ).blocked_autorange() + + # follow https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/tests/python/kernel_test.py + # doesn't seem to be the right way to check for correctness + correct = (fp6_output - fp16_output).abs().mean() / fp16_output.abs().mean() < 1e-3 + + return { + "m": m, + "k": k, + "n": n, + "fp6_latency (ms)": fp6_measurement.median * 1000, + "fp16_latency (ms)": fp16_measurement.median * 1000, + "speedup (d/s)": fp16_measurement.median / fp6_measurement.median, + "correct": correct, + } + + +if __name__ == "__main__": + # from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/tests/python/run.sh + k_vals = (8192, 8192, 8192, 28672) + n_vals = (10240, 8192, 57344, 8192) + + results = [] + + # splitK can be tuned based on m, k, n + for m, splitK_vals in tqdm([ + (1, (5, 6, 7, 6)), + (2, (5, 6, 7, 6)), + (4, (5, 6, 7, 6)), + (8, (5, 6, 7, 6)), + # (16, (5, 6, 7, 6)), + # (64, (5, 6, 7, 6)), + # (128, (5, 3, 3, 3)), + # (256, (4, 3, 2, 3)), + # (512, (2, 5, 2, 4)), + (1024, (1, 2, 1, 2)), + (2048, (1, 1, 1, 1)), + (4096, (1, 1, 1, 1)), + # (8192, (1, 1, 1, 1)), + # (16384, (1, 1, 1, 1)), + ]): + for n, k, splitK in zip(n_vals, k_vals, splitK_vals): + results.append(benchmark(m, n, k, splitK)) + + df = pd.DataFrame(results) + df.to_csv("fp6_benchmark_results.csv", index=False) + print(df.to_markdown(index=False)) diff --git a/setup.py b/setup.py index 7d4875cadb..5d1f32da2b 100644 --- a/setup.py +++ b/setup.py @@ -63,10 +63,10 @@ def get_extensions(): this_dir = os.path.dirname(os.path.curdir) extensions_dir = os.path.join(this_dir, "torchao", "csrc") - sources = list(glob.glob(os.path.join(extensions_dir, "*.cpp"))) + sources = list(glob.glob(os.path.join(extensions_dir, "**/*.cpp"), recursive=True)) extensions_cuda_dir = os.path.join(extensions_dir, "cuda") - cuda_sources = list(glob.glob(os.path.join(extensions_cuda_dir, "*.cu"))) + cuda_sources = list(glob.glob(os.path.join(extensions_cuda_dir, "**/*.cu"), recursive=True)) if use_cuda: sources += cuda_sources diff --git a/test/test_ops.py b/test/test_ops.py index a569f24799..e260e86f0f 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -4,6 +4,7 @@ import torchao from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 import unittest +from parameterized import parameterized # torch.testing._internal.optests.generate_tests.OpCheckError: opcheck(op, ...): @@ -42,6 +43,98 @@ def test_nms(self): test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] opcheck(torch.ops.torchao.nms, (boxes, scores, iou), test_utils=test_utils) + def _create_fp6_inputs(self, BS: int, OC: int, IC: int): + # Randomly initialize each bytes. The highest value for randint() is set the the max value of uint32_t. + fp6_weight = torch.randint(4294967295, (OC, IC // 16 * 3)).to(torch.int) + fp16_scale = torch.rand(OC).half() + 0.5 + fp16_activation = torch.rand(BS, IC).half() + 0.5 + return fp6_weight, fp16_scale, fp16_activation + + def test_prepack_fp6_weight(self): + OC = 256 + IC = 256 + fp6_weight, _, _ = self._create_fp6_inputs(0, OC, IC) + + # smoke test + torchao.ops.prepack_fp6_weight(fp6_weight) + + # comprehensive testing + test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] + opcheck(torch.ops.torchao.prepack_fp6_weight, (fp6_weight,), test_utils=test_utils) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fp16_to_fp6(self): + OC = 256 + IC = 256 + + # in this fp6, we use 3 bits for exponent and 2 bits for mantissa + # also, we don't have nan/inf + fp6_absmax = 28.0 # 2 ** (0b111 - 0b011) * (1 + 0.5 + 0.25), where E=111, M=11 + fp6_absmin = 0.0625 # 2 ** (-0b010) * 0.25, where E=000, M=01 (subnormal number) + fp16_weight = torch.randn((OC, IC), dtype=torch.float16) + fp16_weight.clip_(-fp6_absmax, fp6_absmax) + fp16_weight[fp16_weight.abs() < fp6_absmin] = 0 + + # smoke test + torchao.ops.fp16_to_fp6(fp16_weight) + + # comprehensive testing + test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] + opcheck(torch.ops.torchao.fp16_to_fp6, (fp16_weight,), test_utils=test_utils) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fp16act_fp6weight_linear(self): + BS = 2 + OC = 256 + IC = 256 + splitK = 1 + fp6_weight, fp16_scale, fp16_activation = self._create_fp6_inputs(BS, OC, IC) + + fp6_weight_packed = torchao.ops.prepack_fp6_weight(fp6_weight) + act_cuda = fp16_activation.cuda() + weight_cuda = fp6_weight_packed.cuda() + scale_cuda = fp16_scale.cuda() + + # smoke test + torchao.ops.fp16act_fp6weight_linear(act_cuda, weight_cuda, scale_cuda, splitK) + + # comprehensive testing + test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] + opcheck(torch.ops.torchao.fp16act_fp6weight_linear, (act_cuda, weight_cuda, scale_cuda, splitK), test_utils=test_utils) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fp6_weight_dequant(self): + OC = 256 + IC = 256 + fp6_weight, fp16_scale, _ = self._create_fp6_inputs(0, OC, IC) + + # smoke test + torchao.ops.fp6_weight_dequant(fp6_weight, fp16_scale) + + # comprehensive testing + test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] + opcheck(torch.ops.torchao.fp6_weight_dequant, (fp6_weight, fp16_scale), test_utils=test_utils) + + # adapted from https://github.com/usyd-fsalab/fp6_llm/blob/main/tests/python/kernel_test.py + @parameterized.expand([(1, 2048, 4096, 5), (2, 8192, 8192, 6)]) + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fp6_matmul_correctness(self, BS, OC, IC, splitK): + fp6_weight, fp16_scale, fp16_activation = self._create_fp6_inputs(BS, OC, IC) + + fp6_weight_packed = torchao.ops.prepack_fp6_weight(fp6_weight) + act_cuda = fp16_activation.cuda() + weight_cuda = fp6_weight_packed.cuda() + scale_cuda = fp16_scale.cuda() + + results_fp6 = torchao.ops.fp16act_fp6weight_linear(act_cuda, weight_cuda, scale_cuda, splitK) + + fp16_weight = torchao.ops.fp6_weight_dequant(fp6_weight, fp16_scale).cuda() + results_fp16 = act_cuda @ fp16_weight.T + + error = (results_fp6 - results_fp16).abs() + relative_error = error / results_fp16.abs() + assert relative_error.mean() < 1e-2 + if __name__ == "__main__": unittest.main() diff --git a/torchao/csrc/cuda/fp6_llm/configs.h b/torchao/csrc/cuda/fp6_llm/configs.h new file mode 100644 index 0000000000..0a642fc805 --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/configs.h @@ -0,0 +1,90 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/configs.h + +#ifndef CONFIGS_H +#define CONFIGS_H + +//#define DEBUG_MODE +#define PIPELINE_LEVEL_GMEM 2 +#define PIPELINE_LEVEL_SMEM 2 // only support 2 + +/************************ Hardware Parameters ************************/ +#define WARP_SIZE 32 +#define REG_BIT_WIDTH 32 +// mma: M=16 K=16 N=8 +#define MMA_8 8 +#define MMA_16 16 +// for memory access +#define THREAD_OPT_ACCESS_BIT_WIDTH_128 128 // LDS.128, cp_async.128, ... +#define BIT_WIDTH_PER_HALF 16 // Half precision: FP16 + +/******************** Register Allocation For GEMM ********************/ +#define REG_PER_THREAD_C_TENSOR_16_16 8 // 8 for FP32 Accumulation +/********************** Memory Padding Parameters **********************/ +// Eliminating bank-conflict +#define PADDING_BYTES_16 16 // Padding 16 bytes each column +#define PADDING_SHARED_MEM_FOR_B_8 8 // Padding 8 half each column, during CopyFromGlobalToShared() for B +#define PADDING_SHARED_MEM_FOR_C_4 4 // Padding 4 float each column, during StoreToSharedMemoryFromRegister() for C +/************************* WARP Tiling part-1 *************************/ +#define WARP_ROW_MMA_TENSORS 4 +#define WARP_M (WARP_ROW_MMA_TENSORS * MMA_16) // 64 +#define WARP_K_MMA_TENSORS 4 +#define WARP_K (WARP_K_MMA_TENSORS * MMA_16) // 64 +template +struct TilingConfig { + // Depending on "n" dimension of the GEMM + static constexpr int BLOCK_ROW_WARPS = BLOCK_ROW_WARPS_; + static constexpr int BLOCK_COL_WARPS = BLOCK_COL_WARPS_; + static constexpr int WARP_COL_MMA_TENSORS = WARP_COL_MMA_TENSORS_; + /************************* WARP Tiling part-2 *************************/ + static constexpr int WARP_N = WARP_COL_MMA_TENSORS * MMA_8; + /*************************Thread Block Tiling *************************/ + static constexpr int TILE_M = WARP_M * BLOCK_ROW_WARPS; + static constexpr int TILE_N = MMA_8 * WARP_COL_MMA_TENSORS * BLOCK_COL_WARPS; + static constexpr int TILE_K = WARP_K; + /********************** #Thread per Thread Block **********************/ + static constexpr int BLOCK_WARPS = BLOCK_ROW_WARPS * BLOCK_COL_WARPS; + static constexpr int BLOCK_THREADS = BLOCK_WARPS * WARP_SIZE; + /******************************* Others *******************************/ + static constexpr int SMEM_SIZE_B_TILE = TILE_N * (TILE_K + PADDING_BYTES_16) * 2 * PIPELINE_LEVEL_GMEM; // sizeof(half)=2, doubleBuffer=2 + static constexpr int SMEM_SIZE_C_TILE = TILE_N * (TILE_M + PADDING_BYTES_16) * 4; // sizeof(float)=4 +}; + +/************************ General Config for FP6-LLM **********************/ +#define WEIGHT_FRAG1_BIT_WIDTH 2 +#define WEIGHT_FRAG2_BIT_WIDTH 4 +#define WEIGHT_BIT_WIDTH (WEIGHT_FRAG1_BIT_WIDTH+WEIGHT_FRAG2_BIT_WIDTH) // 6 +//#define QUANT_GROUP_SIZE_DIVIDED_BY_64 4 // QuantGroupSize: 4*64 = 256 +/*************************** 64*64 Weghts of A WARP *************************/ +#define WEIGHT_PER_UNIT (WARP_M*WARP_K) // 64*64 +#define SMEM_SIZE_IN_BYTES_PER_WARP_A1 (WEIGHT_PER_UNIT*WEIGHT_FRAG1_BIT_WIDTH/8) // 1024 Bytes #doubleBuffer not takedn into consideration +#define SMEM_SIZE_IN_BYTES_PER_WARP_A2 (WEIGHT_PER_UNIT*WEIGHT_FRAG2_BIT_WIDTH/8) // 2048 Bytes #doubleBuffer not takedn into consideration +#define SMEM_SIZE_A1_TILE (SMEM_SIZE_IN_BYTES_PER_WARP_A1*4*PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 12 KB; double buffer for 2-level pipeline A= 8 KB. +#define SMEM_SIZE_A2_TILE (SMEM_SIZE_IN_BYTES_PER_WARP_A2*4*PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 24 KB; double buffer for 2-level pipeline A= 16 KB. +/******************** Gloabl Memory Layout For QUANTIZED DATA ******************/ +#define NUM_INT4_PER_UNIT_2BIT_FRAG (WEIGHT_PER_UNIT*WEIGHT_FRAG1_BIT_WIDTH/128) // 64 +#define NUM_INT4_PER_UNIT_4BIT_FRAG (WEIGHT_PER_UNIT*WEIGHT_FRAG2_BIT_WIDTH/128) // 128 +/******************** Register Allocation For QUANTIZED DATA ******************/ +#define WEIGHT_PER_THREAD (WEIGHT_PER_UNIT/WARP_SIZE) // 128 +#define REG_PER_THREAD_2BIT_FRAG (WEIGHT_PER_THREAD/REG_BIT_WIDTH*2) // 8 +#define REG_PER_THREAD_4BIT_FRAG (WEIGHT_PER_THREAD/REG_BIT_WIDTH*4) // 16 +/******************** Register Allocation For QUANT Scales ******************/ +#define WARP_REG_QUANT_SCALE 4 // 8 rows per thread -> 8 FP16 scales -> 4 registers +#define WARP_REG_QUANT_SCALE_DISTRIBUTED 1 // T0-T3, T4-T7, ..., T28-T31 share the same scales, using shfl to get all the scales for each thread + + + +#endif // CONFIGS_H diff --git a/torchao/csrc/cuda/fp6_llm/fp6_linear.cu b/torchao/csrc/cuda/fp6_llm/fp6_linear.cu new file mode 100644 index 0000000000..51413a0874 --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/fp6_linear.cu @@ -0,0 +1,184 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is adapted from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/fp6_linear.cu + +#include "kernel_matmul.cuh" +#include "kernel_reduction.cuh" + +#include +#include + +template +static void Kernel_Ex(cudaStream_t stream, + const uint4 *Weight, + const half *Scales, + const half *B, + OutputDataType *C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + int Split_K) +{ + #ifdef DEBUG_MODE + printf("\n"); + printf("Launcher.cu->Kernel_Ex():\n"); + printf("M: %d, N: %d, K: %d, SplitK: %d\n", M_Global, N_Global, K_Global, Split_K); + printf("TILE_M: %d, TILE_K: %d, TILE_N: %d\n", TilingConfig::TILE_M, TilingConfig::TILE_K, TilingConfig::TILE_N); + #endif + static size_t SHMEM_SZ = max(TilingConfig::SMEM_SIZE_B_TILE+SMEM_SIZE_A1_TILE+SMEM_SIZE_A2_TILE, TilingConfig::SMEM_SIZE_C_TILE); + cudaFuncSetAttribute(QUANT_GEMM_Kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ); + size_t dimN = (N_Global-1) / TilingConfig::TILE_N + 1; + size_t dimM = M_Global * Split_K / TilingConfig::TILE_M; + dim3 GridDim(dimN, dimM, 1); + dim3 BlockDim(WARP_SIZE * TilingConfig::BLOCK_WARPS, 1, 1); + // + #ifdef DEBUG_MODE + printf("GridDim.x: %d, GridDim.y: %d, GridDim.z: %d, BlockDim.x: %d, BlockDim.y: %d, BlockDim.z: %d SHMEM_SZ: %d\n", + GridDim.x, GridDim.y, GridDim.z, BlockDim.x, BlockDim.y, BlockDim.z, SHMEM_SZ); + printf("\n"); + #endif + QUANT_GEMM_Kernel<<>> + (Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); +} + +/* + * + */ +cudaError_t fp6_linear_kernel(cudaStream_t stream, + const uint4 *Weight, + const half *Scales, + const half *B, + half *C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + float *Reduction_Workspace, // Reduction_Workspace_Size = Split_K * M_Global * N_Global * sizeof(fp32) + int Split_K) +{ + assert(M_Global % 256 == 0); + assert(K_Global % 64 == 0); + assert(N_Global>0); + + // Work around to support more N shapes: + size_t N_PowerOf2; + if(N_Global>0 && N_Global<=8) N_PowerOf2 = 8; + if(N_Global>8 && N_Global<=16) N_PowerOf2 = 16; + if(N_Global>16 && N_Global<=32) N_PowerOf2 = 32; + if(N_Global>32 && N_Global<=64) N_PowerOf2 = 64; + if(N_Global>64 && N_Global<=128) N_PowerOf2 = 128; + if(N_Global>128) N_PowerOf2 = ((N_Global-1)/128+1) * 128; + + if (Split_K == 1) { + switch (N_PowerOf2) { + case 8: Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + case 16: Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + case 32: Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + case 64: Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + case 128: Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + default: if (N_PowerOf2 % 128 != 0) { + printf("FP6LLM_API Error: Unsupported N dimension %d!\n", N_PowerOf2); + return cudaErrorUnknown; + } + Kernel_Ex, half>(stream, Weight, Scales, B, C, M_Global, N_Global, K_Global, Split_K); break; + } + } + else { + switch (N_PowerOf2) { + case 8: Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + case 16: Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + case 32: Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + case 64: Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + case 128: Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + default: if (N_PowerOf2 % 128 != 0) { + printf("FP6LLM_API Error: Unsupported N dimension %d!\n", N_PowerOf2); + return cudaErrorUnknown; + } + Kernel_Ex, float>(stream, Weight, Scales, B, Reduction_Workspace, M_Global, N_Global, K_Global, Split_K); break; + } + // Reduction for SplitK + dim3 GridDim((M_Global * N_Global) / REDUCTION_ELEMENT_PER_THREADBLOCK, 1, 1); + dim3 BlockDim(WARP_SIZE, 1, 1); + SplitK_Reduction<<>>(C, Reduction_Workspace, M_Global, N_Global, Split_K); + } + return cudaGetLastError(); +} + + +#include +#include +#include + +namespace torchao { +/* +Computes FP6-FP16 GEMM (PyTorch interface). + +[Mathmatical Formula] +Standard definition of linear layer: Out = In * trans(W), where In, Out, and W are stored in row-major. +After Equivalent transformation : trans(Out) = W * trans(In). Note that we do not perform "transpose" during runtime, we instead interpret the In/Out as column-major matrices when calling our CUDA kernel. + +[Inputs] + _in_feats: tensor of shape [B, IC]; // half + _weights: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. + _scales: tensor of shape [OC]; // half + splitK: spliting the MatMul problem along K dimension for higher GPU utilization, default 1. +[Outputs] + _out_feats: tensor of shape [B, OC]; // half +*/ +torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats, + torch::Tensor _weights, + torch::Tensor _scales, + int64_t splitK=1) +{ + int num_in_feats = _in_feats.size(0); + int num_in_channels = _in_feats.size(1); + int num_out_channels = _weights.size(0); + assert( num_in_channels%64 == 0 ); + assert( (num_in_channels/16*3) == _weights.size(1) ); // Making sure the K dimension is matched. + // + int M = num_out_channels; + int K = num_in_channels; + int N = num_in_feats; + // Input Tensors + auto weight = reinterpret_cast(_weights.data_ptr()); // weights is [OC, IC] but in FP6. + auto in_feats = reinterpret_cast(_in_feats.data_ptr()); + auto scales = reinterpret_cast(_scales.data_ptr()); + // Output Tensors + auto options = torch::TensorOptions().dtype(_in_feats.dtype()).device(_in_feats.device()); + at::Tensor _out_feats = torch::empty({num_in_feats, num_out_channels}, options); + auto out_feats = reinterpret_cast(_out_feats.data_ptr()); + + options = torch::TensorOptions().dtype(torch::kFloat32).device(_in_feats.device()); + at::Tensor _workspace = torch::empty({splitK, num_in_feats, num_out_channels}, options); + auto Reduction_Workspace = reinterpret_cast(_workspace.data_ptr()); // Reduction_Workspace_Size = Split_K * M_Global * N_Global * sizeof(fp32) + + fp6_linear_kernel(0, // Using default stream here. + weight, + scales, + in_feats, + out_feats, + M, + N, + K, + Reduction_Workspace, + splitK); + + return _out_feats; +} + +TORCH_LIBRARY_IMPL(torchao, CUDA, m) { + m.impl("torchao::fp16act_fp6weight_linear", &fp6_linear_forward_cuda); +} + +} // namespace torchao diff --git a/torchao/csrc/cuda/fp6_llm/kernel_matmul.cuh b/torchao/csrc/cuda/fp6_llm/kernel_matmul.cuh new file mode 100644 index 0000000000..de7775ddce --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/kernel_matmul.cuh @@ -0,0 +1,188 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/kernel_matmul.cuh + +#include "configs.h" +#include "utils_gmem.cuh" +#include "utils_core.cuh" + +/* + * C = A*B + * A: row major with ahead-of-time layout transformation, FP6 + * B: col major, FP16 + * C: col major, FP16 + */ + template +__global__ void QUANT_GEMM_Kernel(const uint4* Weight, const half* Scales, + const half *B, + OutputDataType* C, + const size_t M_Global, const size_t N_Global, const size_t K_Global, + int Split_K) +{ + #ifdef DEBUG_MODE + assert(K_Global%TilingConfig::TILE_K==0); + assert(M_Global%TilingConfig::TILE_M==0); + assert( gridDim.y == Split_K * (M_Global/TilingConfig::TILE_M)); + #endif + // 2+4 weight split + const uint4* Weight1 = Weight; + const uint4* Weight2 = Weight1 + M_Global*K_Global*2/128; + // Dynamic shared memory for FP16 A tiles, 128 Bytes aligned + extern __shared__ __align__(128) half smem[]; + half (*smem_array)[WARP_K+PADDING_SHARED_MEM_FOR_B_8] = reinterpret_cast ( smem + (SMEM_SIZE_A1_TILE+SMEM_SIZE_A2_TILE)/2 ); // Dynamic shared memory for FP16 B tiles + __shared__ half QuantScales[64*TilingConfig::BLOCK_WARPS]; // static shared memory for quantization scales, 64 row per warp * 4 warps = 512 Bytes + // Thread Block Mapping, considering SplitK + const size_t BatchID = blockIdx.y / (M_Global/TilingConfig::TILE_M); + const size_t x = blockIdx.x; // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t y = blockIdx.y % (M_Global/TilingConfig::TILE_M); // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t Tile_Start_M = y * TilingConfig::TILE_M; + const size_t Tile_Start_N = x * TilingConfig::TILE_N; + const size_t NumColumnToCopy = (N_Global-Tile_Start_N) < TilingConfig::TILE_N ? (N_Global-Tile_Start_N) : TilingConfig::TILE_N; + const size_t NumBlock_K = K_Global/TilingConfig::TILE_K; + const size_t AverageNumBlock_K = NumBlock_K/Split_K; + const size_t ExtraNumBlock_K = NumBlock_K - AverageNumBlock_K * Split_K; + size_t NumIter = AverageNumBlock_K; + if(BatchID(smem); + uint32_t* AFrag_4BIT_SPTR = AFrag_2BIT_SPTR+SMEM_SIZE_IN_BYTES_PER_WARP_A1/4*TilingConfig::BLOCK_WARPS*PIPELINE_LEVEL_GMEM; // 8 buffers including double buffers, 12 for trible buffers + // StartSPTR for each WARP + AFrag_2BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A1/4; + AFrag_4BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A2/4; + // Pre-fetch of A tile + for(int i=0; i(AFrag_2BIT_SPTR+i*SMEM_SIZE_IN_BYTES_PER_WARP_A1/4*4, WARP_StartGPTR_A1); + CopyFromGlobalToShared_A(AFrag_4BIT_SPTR+i*SMEM_SIZE_IN_BYTES_PER_WARP_A2/4*4, WARP_StartGPTR_A2); + WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1/16; + WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2/16; + } + // Global Memory Address for Matrix A (QuantScale) ///////////////////////////////////////////////////////////////////// + const half* TB_StartGPTR_A_Scale = Scales + (y*TilingConfig::BLOCK_ROW_WARPS) * 64; + const half* WARP_StartGPTR_A_Scales = TB_StartGPTR_A_Scale + WARP_i * 64; + CopyFromGlobalToShared_Scales(QuantScales+WARP_i*64, WARP_StartGPTR_A_Scales); + // Copying B tile from Global to Shared, considering SplitK ///////////////////////////////////////////////////////////// + const half *BTile_GPTR = B + Tile_Start_N * K_Global + StartBlockID_K * TilingConfig::TILE_K; + for(int i=0; i (smem_array+i*TilingConfig::TILE_N, BTile_GPTR, K_Global, NumColumnToCopy); + BTile_GPTR += TilingConfig::TILE_K; + } + // Register Allocation for A,B, and C, Initilazed to Zeros ///////////////////////////////////////////////////////////////////// + constexpr int NumRegSets_a = WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + constexpr int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS==1) ? 1 : TilingConfig::WARP_COL_MMA_TENSORS/2; // 1 set = 4 registers, containing a 16*16 MMA block +#ifdef PIPELINE_LEVEL_SMEM + uint32_t a [NumRegSets_a * PIPELINE_LEVEL_SMEM][4]; // double/Trible buffer is used // Registers to store decompressed FP6 + uint32_t b [NumRegSets_b * PIPELINE_LEVEL_SMEM][4]; // double/Triple buffer is used // Register to store FP16 B matrix (a slice) +#endif + float c[NumRegSets_a * NumRegSets_b][REG_PER_THREAD_C_TENSOR_16_16]; + for(int i=0; i(a, b, AFrag_2BIT_SPTR, AFrag_4BIT_SPTR, smem_array, Scales_RPTR); +#endif + // The outer loop. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + #pragma unroll(1) + for (size_t tile_id_k = 0; tile_id_k < NumIter; tile_id_k++) + { + // Trible-Buffer for A Tile + uint32_t* __restrict__ read_SPTR_Frag1 = AFrag_2BIT_SPTR + ((tile_id_k+0) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1/4*4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ read_SPTR_Frag2 = AFrag_4BIT_SPTR + ((tile_id_k+0) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2/4*4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 +#ifdef PIPELINE_LEVEL_SMEM + uint32_t* __restrict__ read2_SPTR_Frag1 = AFrag_2BIT_SPTR + ((tile_id_k+1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1/4*4; + uint32_t* __restrict__ read2_SPTR_Frag2 = AFrag_4BIT_SPTR + ((tile_id_k+1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2/4*4; +#endif + uint32_t* __restrict__ write_SPTR_Frag1 = AFrag_2BIT_SPTR + ((tile_id_k+(PIPELINE_LEVEL_GMEM-1)) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1/4*4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ write_SPTR_Frag2 = AFrag_4BIT_SPTR + ((tile_id_k+(PIPELINE_LEVEL_GMEM-1)) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2/4*4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + // Trible-Buffer for B Tile + half __restrict__ (*read_SPTR )[WARP_K+PADDING_SHARED_MEM_FOR_B_8] = smem_array + ((tile_id_k+0) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#ifdef PIPELINE_LEVEL_SMEM + half __restrict__ (*read2_SPTR )[WARP_K+PADDING_SHARED_MEM_FOR_B_8] = smem_array + ((tile_id_k+1) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#endif + half __restrict__ (*write_SPTR)[WARP_K+PADDING_SHARED_MEM_FOR_B_8] = smem_array + ((tile_id_k+(PIPELINE_LEVEL_GMEM-1)) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; + // + bool GlobalCopy = (tile_id_k+PIPELINE_LEVEL_GMEM-1) < NumIter; + // Copying A tile from Global to Register, Bypassing L1, using double-buffer + CopyFromGlobalToShared_A(write_SPTR_Frag1, WARP_StartGPTR_A1, GlobalCopy); + CopyFromGlobalToShared_A(write_SPTR_Frag2, WARP_StartGPTR_A2, GlobalCopy); + // copying B tile from GlobalMemory to SharedMemory + CopyFromGlobalToShared (write_SPTR, BTile_GPTR, K_Global, NumColumnToCopy, GlobalCopy); + cp_async_group_commit(); + #ifdef PIPELINE_LEVEL_SMEM + core_mma_slice(c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 1); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each WARP; read_SPTR is shared among WARPs + core_mma_slice(c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 2); + core_mma_slice(c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 3); + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); + core_mma_slice(c, a, b, read2_SPTR_Frag1, read2_SPTR_Frag2, read2_SPTR, Scales_RPTR, 0); + // Updating global PTRs + WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1/16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2/16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; + #else + PipelinedCoreLoop(c, read_SPTR, read_SPTR_Frag1, read_SPTR_Frag2, Scales_RPTR); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each WARP; read_SPTR is shared among WARPs + // Updating global PTRs + WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1/16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2/16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); + #endif + } + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Store the C fragments to shared memory. + float (*smem_CFrag) [TilingConfig::TILE_M+PADDING_SHARED_MEM_FOR_C_4] = + reinterpret_cast (smem); + StoreToSharedMemoryFromRegister(smem_CFrag, c); + __syncthreads(); + // Now that shared memory contains all the D tiles, stream them to global memory. + OutputDataType* BlockGlobalPTR = C + BatchID*(M_Global*N_Global) + Tile_Start_M + Tile_Start_N*M_Global; + for(size_t i=warpId; i::value) BlockGlobalPTR[j+i*M_Global] = __float2half_rn(smem_CFrag[i][j]); + else BlockGlobalPTR[j+i*M_Global] = smem_CFrag[i][j]; + } +} diff --git a/torchao/csrc/cuda/fp6_llm/kernel_reduction.cuh b/torchao/csrc/cuda/fp6_llm/kernel_reduction.cuh new file mode 100644 index 0000000000..c0e7c1918a --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/kernel_reduction.cuh @@ -0,0 +1,63 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/kernel_reduction.cuh + +/*************************************************************************** + * Copyright 2023 The FLash-LLM Authors. All rights reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ***************************************************************************/ +// Used for the reduction of result matrix if Split-K is used +// Reduction_Workspace: (Split_K, M_Global, N_Global), column major +// C: (M_Global, N_Global), column major +// Each thread deals with 8 output elements, each elements is the sum of Split_K elements +// Read Global: Each Warp/ThreadBlock: 32 threads_per_warp * 8 float_per_thread (256bit) -> 256 float per warp +// Write Global: Each Warp/ThreadBlock: 32 threads_per_warp * 8 half_per_thread (128bit) -> 256 half per warp +// GridSize = (M_Global*N_Global) / 256 + +#include +#include +#include + +#define REDUCTION_ELEMENT_PER_THREADBLOCK 256 +#define HALF_PER_128BIT 8 + +__global__ void SplitK_Reduction(half* C, float* Reduction_Workspace, size_t M_Global, size_t N_Global, int Split_K) +{ + half* WARP_GPTR_C = C + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x; + float* WARP_GPTR_R = Reduction_Workspace + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x; + half* THREAD_GPTR_C = WARP_GPTR_C + threadIdx.x * HALF_PER_128BIT; + float* THREAD_GPTR_R = WARP_GPTR_R + threadIdx.x * HALF_PER_128BIT; + // Initializing Thread-Local Results + float Results[HALF_PER_128BIT]; + #pragma unroll + for (int i = 0; i < HALF_PER_128BIT; i++) Results[i] = 0.0f; + // Reduction + for (int i = 0; i < Split_K; i++) { + #pragma unroll + for (int j = 0; j < HALF_PER_128BIT; j++) Results[j] += THREAD_GPTR_R[j]; + THREAD_GPTR_R += M_Global * N_Global; + } + // Writing to global memory + #pragma unroll + for (int i = 0; i < HALF_PER_128BIT; i++) THREAD_GPTR_C[i] = __float2half_rn(Results[i]); +} diff --git a/torchao/csrc/cuda/fp6_llm/ptx_cp.async.cuh b/torchao/csrc/cuda/fp6_llm/ptx_cp.async.cuh new file mode 100644 index 0000000000..c1d064f32a --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/ptx_cp.async.cuh @@ -0,0 +1,75 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/ptx_cp.async.cuh + +/*************************************************************************** + * Copyright 2023 The FLash-LLM Authors. All rights reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ***************************************************************************/ +// Extended from CUTLASS's source code + +#ifndef PTX_CP_ASYNC_CUH +#define PTX_CP_ASYNC_CUH + +#include +#include +#include + +template +__device__ __forceinline__ void cp_async(half* smem_ptr, const half* global_ptr, bool pred_guard = true) +{ + static_assert(SizeInBytes == 16, "Size is not supported"); + unsigned smem_int_ptr = __cvta_generic_to_shared(smem_ptr); + asm volatile("{ \n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], %3;\n" + "}\n" ::"r"((int)pred_guard), + "r"(smem_int_ptr), + "l"(global_ptr), + "n"(SizeInBytes)); +} + +/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block. +__device__ __forceinline__ void cp_async_group_commit() +{ + asm volatile("cp.async.commit_group;\n" ::); +} + +/// Blocks until all but previous cp.async.commit_group operations have committed. +template +__device__ __forceinline__ void cp_async_wait_group() +{ + asm volatile("cp.async.wait_group %0;\n" ::"n"(N)); +} + +/// Blocks until all previous cp.async.commit_group operations have committed. +// cp.async.wait_all is equivalent to : +// cp.async.commit_group; +// cp.async.wait_group 0; +__device__ __forceinline__ void cp_async_wait_all() +{ + asm volatile("cp.async.wait_all;\n" ::); +} + +#endif diff --git a/torchao/csrc/cuda/fp6_llm/ptx_mma.cuh b/torchao/csrc/cuda/fp6_llm/ptx_mma.cuh new file mode 100644 index 0000000000..d0985bd63d --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/ptx_mma.cuh @@ -0,0 +1,129 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/ptx_mma.cuh + +/*************************************************************************** + * Copyright 2023 The FLash-LLM Authors. All rights reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ***************************************************************************/ +#ifndef PTX_MMA_CUH +#define PTX_MMA_CUH + +#include +#include +#include + +#include +#include "configs.h" + +#ifdef PIPELINE_LEVEL_SMEM +template +__device__ __forceinline__ void B_FromSharedToReg(uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + int slice_id) { + #ifdef DEBUG_MODE + static_assert( (TilingConfig::WARP_COL_MMA_TENSORS==1) || (TilingConfig::WARP_COL_MMA_TENSORS%2==0) ); + #endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * WARP_j; // each warp may start from reading warp_start_col'th column of the B tile in shared memory + #ifdef DEBUG_MODE + assert( warp_start_col==0 ); + #endif + + int col = (lane_id%8) + (lane_id/16)*8; + int row = (lane_id%16) / 8 * 8; + uint32_t smem_local_ptr = static_cast(__cvta_generic_to_shared(&read_SPTR[warp_start_col+col][slice_id*MMA_16 + row])); + if(TilingConfig::WARP_COL_MMA_TENSORS==1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } + else { + #pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS/2; i++) + { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K+PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +} +#else +// Debug: Whether ldmatrix.trans is required??? +// B is in column-major +template +__device__ __forceinline__ void B_FromSharedToReg(uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + int k_offset) { + #ifdef DEBUG_MODE + static_assert( (TilingConfig::WARP_COL_MMA_TENSORS==1) || (TilingConfig::WARP_COL_MMA_TENSORS%2==0) ); + #endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * WARP_j; // each warp may start from reading warp_start_col'th column of the B tile in shared memory + #ifdef DEBUG_MODE + assert( warp_start_col==0 ); + #endif + + int col = (lane_id%8) + (lane_id/16)*8; + int row = (lane_id%16) / 8 * 8; + uint32_t smem_local_ptr = static_cast(__cvta_generic_to_shared(&read_SPTR[warp_start_col+col][k_offset + row])); + if(TilingConfig::WARP_COL_MMA_TENSORS==1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } + else { + #pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS/2; i++) + { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K+PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +} +#endif + +__device__ __forceinline__ void +MMA_FP16_M16N8K16(uint32_t __restrict__ c[], uint32_t __restrict__ *a, uint32_t __restrict__ *b) +{ + asm volatile("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" + "{ %0, %1, %2, %3}," + "{ %4, %5, %6, %7 }," + "{ %8, %9 }," + "{ %10, %11, %12, %13 };" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), + "r"(b[0]), "r"(b[1]), + "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); +} + +#endif diff --git a/torchao/csrc/cuda/fp6_llm/utils_core.cuh b/torchao/csrc/cuda/fp6_llm/utils_core.cuh new file mode 100644 index 0000000000..5bfc043ef6 --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/utils_core.cuh @@ -0,0 +1,216 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/utils_core.cuh + +#ifndef UTILS_CORE_CUH +#define UTILS_CORE_CUH + +#include + +#include "configs.h" +#include "ptx_mma.cuh" +#include "utils_parallel_dequant.cuh" + + +#ifdef PIPELINE_LEVEL_SMEM +template +__device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[], uint32_t* SPTR, int slice_id) { + SPTR += slice_id * (NUM_INT_PER_THREAD*WARP_SIZE); + int lane_id = threadIdx.x % WARP_SIZE; + #pragma unroll + for(int i=0; i +__device__ __forceinline__ void initialize_mma_slice(uint32_t (*a)[4], + uint32_t (*b)[4], + uint32_t* __restrict__ A1_SPTR_read, + uint32_t* __restrict__ A2_SPTR_read, + half __restrict__ (*B_SPTR_read)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + uint32_t* RPTR_Scales) +{ + // Writing registers + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 register per thread; + uint32_t a_1[2]; // NO double buffer + uint32_t a_2[4]; // NO double buffer + CopyFromSharedToRegister_AFrag<2> (a_1, A1_SPTR_read, 0); + CopyFromSharedToRegister_AFrag<4> (a_2, A2_SPTR_read, 0); + Dequant_32FP6_4Way(a, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at register level, dequantizing a slice each time + B_FromSharedToReg(b, B_SPTR_read, 0); // Loading B from shared to registers +} + +template +__device__ __forceinline__ void core_mma_slice(float c[][REG_PER_THREAD_C_TENSOR_16_16], + uint32_t (*a)[4], + uint32_t (*b)[4], + uint32_t* __restrict__ A1_SPTR_read, + uint32_t* __restrict__ A2_SPTR_read, + half __restrict__ (*B_SPTR_read)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + uint32_t* RPTR_Scales, + int slice_id) // writing slice[slice_id] to registers, k=0 -> slice_id=1 for prefetching +{ + #ifdef DEBUG_MODE + assert((TilingConfig::WARP_COL_MMA_TENSORS==1) || (TilingConfig::WARP_COL_MMA_TENSORS%2==0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block + #endif + const int NumRegSets_a = WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS==1) ? 1 : TilingConfig::WARP_COL_MMA_TENSORS/2; // 1 set = 4 registers, containing a 16*16 MMA block + uint32_t (*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] = reinterpret_cast(c); // Reigsters for accumulated FP32 results + + // Setting RPTRs for double buffers + uint32_t (*a_read )[4] = a; + uint32_t (*a_write)[4] = a; + uint32_t (*b_read )[4] = b; + uint32_t (*b_write)[4] = b; + if(slice_id%2==1) { b_write += NumRegSets_b; a_write += NumRegSets_a;} + else { b_read += NumRegSets_b; a_read += NumRegSets_a;} + + // Reading registers and issuing core tensor core computations (a slice of A and B tile in shared memory) + #pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { + if(TilingConfig::WARP_COL_MMA_TENSORS==1) { + MMA_FP16_M16N8K16( c_uint_ptr[i], a_read[i], b_read[0] ); + } + else { + #pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS/2; j++) { + MMA_FP16_M16N8K16( c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a_read[i], b_read[j] ); + MMA_FP16_M16N8K16( c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4, a_read[i], b_read[j] + 2 ); // c+4; b+2 + } + } + } + + // Writing registers + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 register per thread; + uint32_t a_1[2]; // NO double buffer + uint32_t a_2[4]; // NO double buffer + CopyFromSharedToRegister_AFrag<2> (a_1, A1_SPTR_read, slice_id); + CopyFromSharedToRegister_AFrag<4> (a_2, A2_SPTR_read, slice_id); + Dequant_32FP6_4Way(a_write, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at register level, dequantizing a slice each time + B_FromSharedToReg (b_write, B_SPTR_read, slice_id); // Loading B from shared to registers +} + +#else +// Old version with naive pipeline design +template +__device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[], uint32_t* SPTR) { + int lane_id = threadIdx.x % WARP_SIZE; + #pragma unroll + for(int i=0; i +__device__ __forceinline__ void PipelinedCoreLoop(float c[][REG_PER_THREAD_C_TENSOR_16_16], + half __restrict__ (*read_SPTR)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + uint32_t* __restrict__ read_SPTR_Frag1, + uint32_t* __restrict__ read_SPTR_Frag2, + uint32_t* RPTR_Scales) +{ + #ifdef DEBUG_MODE + assert((TilingConfig::WARP_COL_MMA_TENSORS==1) || (TilingConfig::WARP_COL_MMA_TENSORS%2==0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block + #endif + const int NumRegSets_a = WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS==1) ? 1 : TilingConfig::WARP_COL_MMA_TENSORS/2; // 1 set = 4 registers, containing a 16*16 MMA block + + // Reigsters to store FP32 results + uint32_t (*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] = reinterpret_cast(c); + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 register per thread; + uint32_t a_1[2*2]; // double buffer is used + uint32_t a_2[4*2]; // double buffer is used + // Registers to store decompressed FP6 + uint32_t a [NumRegSets_a * 1][4]; // No double buffer + // Register to store FP16 B matrix (a slice) + uint32_t b [NumRegSets_b * 2][4]; // double buffer is used + + // Overlapped Smem and TC pipeline: pre-loading from shared to registers + CopyFromSharedToRegister_AFrag<2> (a_1, read_SPTR_Frag1); + CopyFromSharedToRegister_AFrag<4> (a_2, read_SPTR_Frag2); + B_FromSharedToReg (b, read_SPTR, 0); + + #pragma unroll + for (int k = 0; k < WARP_K_MMA_TENSORS; k++) { + uint32_t (*b_read)[4] = b; + uint32_t (*b_write)[4] = b; + uint32_t *a_1_read = a_1; + uint32_t *a_1_write = a_1; + uint32_t *a_2_read = a_2; + uint32_t *a_2_write = a_2; + if(k%2==0) { + b_write += NumRegSets_b; + a_1_write += 2; + a_2_write += 4; + } + else { + b_read += NumRegSets_b; + a_1_read += 2; + a_2_read += 4; + } + // data loading + if (k + 1 < WARP_K_MMA_TENSORS) { + // updating SPTR for fragment1 and fragment2 + read_SPTR_Frag1 += 2*WARP_SIZE; + read_SPTR_Frag2 += 4*WARP_SIZE; + CopyFromSharedToRegister_AFrag<2>(a_1_write, read_SPTR_Frag1); + CopyFromSharedToRegister_AFrag<4>(a_2_write, read_SPTR_Frag2); + B_FromSharedToReg(b_write, read_SPTR, (k+1)*MMA_16); + } + // SIMT Dequant + Tensor Core computations + Dequant_32FP6_4Way(a, a_1_read, a_2_read, RPTR_Scales); // Dequantizing FP6 to FP16 at register level, dequantizing a slice each time + #pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { + if(TilingConfig::WARP_COL_MMA_TENSORS==1) + MMA_FP16_M16N8K16( c_uint_ptr[i], a[i], b_read[0] ); + else { + #pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS/2; j++) { + MMA_FP16_M16N8K16( c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a[i], b_read[j] ); + MMA_FP16_M16N8K16( c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4, a[i], b_read[j] + 2 ); // c+4; b+2 + } + } + } + } +} +#endif // #ifdef PIPELINE_LEVEL_SMEM + +template +__device__ __forceinline__ void StoreToSharedMemoryFromRegister(float (*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4], + float c[][REG_PER_THREAD_C_TENSOR_16_16]) +{ + const int lane_id = threadIdx.x % WARP_SIZE; + const int warpId = threadIdx.x / WARP_SIZE; + int warp_row_offset = warpId * (MMA_16 * WARP_ROW_MMA_TENSORS); + #pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { + #pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS; j++) { // Dealing with one 16*8 Tensor + int RegSetID = i + (j/2)*WARP_ROW_MMA_TENSORS; + int RegOffset = (j%2)*(REG_PER_THREAD_C_TENSOR_16_16/2); + int Tensor_row_offset = warp_row_offset + i * MMA_16; + int Tensor_col_offset = j * MMA_8; + #pragma unroll + for (int r = 0; r < REG_PER_THREAD_C_TENSOR_16_16/2; r++) { + int row_offset = lane_id / 4; + if (r >= 2) row_offset += 8; + int col_offset = (lane_id % 4) * 2; + if (r%2==1) col_offset += 1; + smem_CFrag[Tensor_col_offset + col_offset][Tensor_row_offset + row_offset] = c[RegSetID][r + RegOffset]; + } + } + } +} + +#endif diff --git a/torchao/csrc/cuda/fp6_llm/utils_gmem.cuh b/torchao/csrc/cuda/fp6_llm/utils_gmem.cuh new file mode 100644 index 0000000000..5c37452e13 --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/utils_gmem.cuh @@ -0,0 +1,91 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/utils_gmem.cuh + +#ifndef UTILS_GMEM_CUH +#define UTILS_GMEM_CUH + +#include +#include "configs.h" +#include "ptx_cp.async.cuh" + +/* + * Copying A1/A2 from global memory to shared memory. + * Usually 1024 or 2048 Bytes + */ +template +__device__ __forceinline__ void CopyFromGlobalToShared_A(uint32_t* SPTR, + const uint4* GPTR, + bool pred_guard = true) { + #ifdef DEBUG_MODE + static_assert(SMEM_SIZE_IN_BYTES_PER_WARP/WARP_SIZE % 16 == 0); + #endif + int lane_id = threadIdx.x % WARP_SIZE; + half* SPTR_HALF = reinterpret_cast(SPTR); + const half* GPTR_HALF = reinterpret_cast(GPTR); + SPTR_HALF += lane_id*8; + GPTR_HALF += lane_id*8; + #pragma unroll + for(int i=0; i( SPTR_HALF, GPTR_HALF, pred_guard); + SPTR_HALF += 256; // Forward 512 Bytes + GPTR_HALF += 256; // Forward 512 Bytes + } + +} + +/* + * Copying 64 Quant Scales (FP16) from global memory to shared memory. + */ +__device__ __forceinline__ void CopyFromGlobalToShared_Scales(half* SPTR_QuantScales, + const half* GPTR_A_Scales) { + int lane_id = threadIdx.x % WARP_SIZE; + int Offset_Shared = lane_id*2; + int Offset_Global = lane_id/4 + (lane_id%4)*16; + for(int i=0; i<2; i++) SPTR_QuantScales[Offset_Shared+i] = GPTR_A_Scales[Offset_Global+i*8]; +} + +/* + * (1) Copying X rows * 64 columns of FP16 values, originally in row major + * (2) Copying 64 rows * X columns of FP16 values, originally in column major + * 16 Bytes per thread -> 512 Bytes per WARP = 4 line per WARP = 1 line per 8 Threads + */ +template +__device__ __forceinline__ void CopyFromGlobalToShared(half __restrict__ (*SharedPTR)[WARP_K+PADDING_SHARED_MEM_FOR_B_8], + const half* GlobalPTR, + const int GlobalStride, + const int NumOfLinesLeft, // To support arbitrary N dimensions. + bool Pred = true) { + // static parameters: 1 Group (8 Threads) can copy 1 line (64 FP16) each time + const int NumOfThreads = BLOCK_WARPS * WARP_SIZE; + const int NumOfGroups = NumOfThreads / 8; + const int MaxIteration = (MaxNumOfLinesToCopy-1) / NumOfGroups + 1; + // runtime variables + const int line_id = threadIdx.x / 8; + const int line_offset = (threadIdx.x%8) * 8; + // PTR for source global memory and target shared memory + GlobalPTR += line_id * GlobalStride + line_offset; + SharedPTR += line_id; + #pragma unroll + for (int i = 0; i < MaxIteration; i++) { + bool AsyncCopyPred = (line_id+i*NumOfGroups) < NumOfLinesLeft && Pred; + cp_async<16>( &(*SharedPTR)[line_offset], GlobalPTR, AsyncCopyPred); + // + GlobalPTR += NumOfGroups * GlobalStride; + SharedPTR += NumOfGroups; + } +} + +#endif diff --git a/torchao/csrc/cuda/fp6_llm/utils_parallel_dequant.cuh b/torchao/csrc/cuda/fp6_llm/utils_parallel_dequant.cuh new file mode 100644 index 0000000000..f6ce4cc046 --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/utils_parallel_dequant.cuh @@ -0,0 +1,127 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is copied from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/include/utils_parallel_dequant.cuh + +#ifndef UTILS_PARALLELDEQUANT_CUH +#define UTILS_PARALLELDEQUANT_CUH + +#include +#include +#include + +/* + * Input: R1 + * Outputs: R1, R2 + * Note: Simplified Exponent calculation is applied. + */ +__device__ __forceinline__ void FP6_FP16_Cast_4Way(u_int32_t *R1, u_int32_t *R2) { + *R2 = *R1 & 0x80808080; + *R1 = *R1 >> 2; + *R1 = *R1 & 0x1f1f1f1f; + *R2 = *R2 | *R1; + *R1 = *R2 & 0x9f009f00; + *R2 = *R2 & 0x009f009f; + *R2 = *R2 << 8; +} + +/* + * Input: R1 + * Outputs: R1, R2 + * Note: Simplified Exponent calculation is NOT applied. + */ +__device__ __forceinline__ void FP6_FP16_Cast_4Way_Naive(u_int32_t *R1, u_int32_t *R2) { + //*R2 = *R1 & 0x80808080; + *R2 = *R1 & 0xc0c0c0c0; + *R1 = *R1 >> 2; + //*R1 = *R1 & 0x1f1f1f1f; + *R1 = *R1 & 0x0f0f0f0f; + *R2 = *R2 | *R1; + // + //*R1 = *R2 & 0x9f009f00; + //*R2 = *R2 & 0x009f009f; + *R1 = *R2 & 0xcf00cf00; + if( !(*R1 & 0x40000000) && (*R1 & 0x0c000000) ) *R1 = *R1 | 0x30000000; + if( !(*R1 & 0x00004000) && (*R1 & 0x00000c00) ) *R1 = *R1 | 0x00003000; + *R2 = *R2 & 0x00cf00cf; + if( !(*R2 & 0x00400000) && (*R2 & 0x000c0000) ) *R2 = *R2 | 0x00300000; + if( !(*R2 & 0x00000040) && (*R2 & 0x0000000c) ) *R2 = *R2 | 0x00000030; + // + *R2 = *R2 << 8; + //*R1 = 0x3c003c00; + //*R2 = 0x3c003c00; +} + +__device__ __forceinline__ u_int32_t MultScale(u_int32_t PackedFP16Pair, half Scale) { + half* FP16_1 = reinterpret_cast(&PackedFP16Pair); + half* FP16_2 = FP16_1 + 1; + uint32_t output; + half* output_half_ptr = reinterpret_cast(&output); + output_half_ptr[0] = __hmul( __hmul(*FP16_1,__float2half(4096.0f)), Scale); + output_half_ptr[1] = __hmul( __hmul(*FP16_2,__float2half(4096.0f)), Scale); + return output; +} + +__device__ __forceinline__ void Dequant_32FP6_4Way(u_int32_t __restrict__ Reg[][4], + u_int32_t __restrict__ *read_RPTR_Frag1, + u_int32_t __restrict__ *read_RPTR_Frag2, + u_int32_t *Scales) { + u_int32_t *OutputRegs = reinterpret_cast (Reg); + u_int32_t *Frag1_PTR = read_RPTR_Frag1; + u_int32_t *Frag2_PTR = read_RPTR_Frag2; + half *Scale_RPTR = reinterpret_cast(Scales); + u_int32_t Packed_FP6 = 0; + u_int32_t tmp = 0; + // Dequantizing 32 FP6, each Loop dequantizing 4 FP6 + #pragma unroll(8) + for(int i=0; i<8; i++) { + // Frag1 + Packed_FP6 = (*Frag1_PTR) & 0xc0c0c0c0; + if(i%4==3) Frag1_PTR++; + else (*Frag1_PTR) = (*Frag1_PTR) << 2; + // Frag2 + tmp = (*Frag2_PTR) & 0xf0f0f0f0; + tmp = tmp >> 2; + if(i%2==1) Frag2_PTR++; + else (*Frag2_PTR) = (*Frag2_PTR) << 4; + // Packed_FP6 + Packed_FP6 = Packed_FP6 | tmp; + // + FP6_FP16_Cast_4Way(&Packed_FP6, &tmp); + // + *OutputRegs = MultScale(Packed_FP6, Scale_RPTR[0] ); // Muliply FP16 scales + OutputRegs += 1; + *OutputRegs = MultScale(tmp, Scale_RPTR[1]); // Muliply FP16 scales + OutputRegs += 1; + // Updating offset for FP16 scales for every two iterations + if(i%2==1) Scale_RPTR += 2; + } + +} + +/* + * + */ +__device__ __forceinline__ void ExtractFromSharedToReg_Scales(uint32_t* Scales, half* WARP_SPTR_Scales) { + int lane_id = threadIdx.x % WARP_SIZE; + uint32_t* SPTR_uint = reinterpret_cast(WARP_SPTR_Scales); + uint32_t tmpReg = SPTR_uint[lane_id]; + #pragma unroll + for(int i=0; i<4; i++) { + // T __shfl_sync(unsigned mask, T var, int srcLane, int width=warpSize); + Scales[i] = __shfl_sync(0xffffffff, tmpReg, i, 4); + } +} + +#endif diff --git a/torchao/csrc/cuda/fp6_llm/weight_quant.cu b/torchao/csrc/cuda/fp6_llm/weight_quant.cu new file mode 100644 index 0000000000..d29f70be0c --- /dev/null +++ b/torchao/csrc/cuda/fp6_llm/weight_quant.cu @@ -0,0 +1,219 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is adapted from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_quant.h +// and https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_dequant.h + +#include +#include +#include + +/* + * Function to pack 4 fake quantized FP16 value into continuously stored 4 FP6 values. + */ +void cast_fp16_fp6(uint16_t* FP16x4, uint8_t* FP6x4) +{ + // Constants for FP6 + constexpr int exponent_nbits_fp6 = 3; + constexpr int mantissa_nbits_fp6 = 2; + constexpr int exp_bias_fp6 = (1 << (exponent_nbits_fp6 - 1)) - 1; + // Constants for FP16 + constexpr int exponent_nbits_fp16 = 5; + constexpr int mantissa_nbits_fp16 = 10; + constexpr int exp_bias_fp16 = (1 << (exponent_nbits_fp16 - 1)) - 1; + + int fp6_temp[4]; + + float absmin_nonzero_fp6 = 0.0625; + // Note that we regard the exponent of '111' as a regular value rather than NaN or inf. This is + // the same with that in qtorch. + float absmax_fp6 = 28; + + for (int i = 0; i < 4; ++i) { + uint16_t source = FP16x4[i]; + float fp6_value_abs = std::abs(__half2float(*((half*)(&source)))); + if ((fp6_value_abs != 0 && fp6_value_abs < absmin_nonzero_fp6) || + fp6_value_abs > absmax_fp6) { + // TODO(zhen): a better way may be rounding it to the nearest FP6 value. + throw std::invalid_argument("Input value out of range for FP6."); + } + + // It is not safe to do shift operation on uint16_t. So we promote it to int. + int source_promote = int(source); + + int sign_bit = (source_promote >> 15); + // Extracting exponent represented in FP16. The sign mask 0x7FFF is '0111 1111 1111 1111' + int exp_bit = (source_promote & 0x7FFF) >> mantissa_nbits_fp16; + // Extracting mantissa represented in FP16 + int mant_bit = source_promote & ((1 << mantissa_nbits_fp16) - 1); + + int new_exp_bit; + int new_mant_bit; + + if (exp_bit == 0) { + // Subnormal FP16 number. Too small for FP6. + new_exp_bit = 0; + new_mant_bit = 0; + } else { + new_mant_bit = mant_bit >> (mantissa_nbits_fp16 - mantissa_nbits_fp6); + new_exp_bit = exp_bit - exp_bias_fp16 + exp_bias_fp6; + + // Deal with subnormal FP6 values. + int target_exp_val = exp_bit - exp_bias_fp16; + int min_fp6_exp_val = -exp_bias_fp6 + 1; + bool subnormal_fp6 = target_exp_val < min_fp6_exp_val; + if (subnormal_fp6) { + // TODO(zhen): add the rounding logic. + new_exp_bit = 0; + // The implicit 1 in the mantissa of FP16 is not present in subnormal FP6. Thus we + // need to add it + new_mant_bit = (new_mant_bit | (1 << mantissa_nbits_fp6)) >> + (min_fp6_exp_val - target_exp_val); + } + } + + fp6_temp[i] = (sign_bit << (exponent_nbits_fp6 + mantissa_nbits_fp6)) | + (new_exp_bit << mantissa_nbits_fp6) | new_mant_bit; + } + // Pack the values + FP6x4[0] = fp6_temp[0] << 2 | (fp6_temp[1] >> 4); + FP6x4[1] = (fp6_temp[1] & 0x0F) << 4 | (fp6_temp[2] >> 2); + FP6x4[2] = (fp6_temp[2] & 0x03) << 6 | fp6_temp[3]; +} + +/* + * Function to prepack FP16 weights into continuous FP6 values. + * + * Parameters: + * weight_16bit: input weight in FP16, size M*K + * weight_6bit: output weight in packed FP6, continuously stored, size M*K*6/8 + * M, K: the shape of the weight + */ +void weight_prepacking_fp16_to_fp6(uint16_t* weight_16bit, + uint8_t* weight_6bit_packed, + size_t M, + size_t K) +{ + // Every four 16-bit elements are packed into three 6-bit values (4*6bit == 3*8bit). + if (K * 6 % 8 != 0) { throw std::invalid_argument("(K * 6 % 8) should be 0"); } + size_t K_fp6_packed = K * 6 / 8; + // #pragma omp parallel for + for (auto m = 0; m < M; m++) { + uint8_t* ptr_6bit = weight_6bit_packed + m * K_fp6_packed; + uint16_t* ptr_16bit = weight_16bit + m * K; + for (auto k = 0; k < K; k += 4) { + cast_fp16_fp6(ptr_16bit, ptr_6bit); + ptr_16bit += 4; + ptr_6bit += 3; + } + } +} + +void DeQuantMatrix_FP6_To_FP16(half* A_16bit_h, unsigned char* A_6bit_h, size_t M, size_t K, half* scale) { + assert(M%64==0); // Currently, M must be a multiple of 64. + assert(K%64==0); // Currently, K must be a multiple of 64. + size_t TotalSizeInByte = M*K*6/8; + // + half* OutPTR = A_16bit_h; + for(size_t i=0; i>2)&0x1f); + unsigned char B2 = (A_6bit_h[i*3+0]<<6) | ((A_6bit_h[i*3+1]>>2)&0xfc); + B2 = (B2&0x80) | ((B2>>2)&0x1f); + unsigned char B3 = (A_6bit_h[i*3+1]<<4) | ((A_6bit_h[i*3+2]>>4)&0xfc); + B3 = (B3&0x80) | ((B3>>2)&0x1f); + unsigned char B4 = A_6bit_h[i*3+2]<<2; + B4 = (B4&0x80) | ((B4>>2)&0x1f); + half FP1, FP2, FP3, FP4; + unsigned char *PTR1, *PTR2, *PTR3, *PTR4; + PTR1 = reinterpret_cast(&FP1); + PTR2 = reinterpret_cast(&FP2); + PTR3 = reinterpret_cast(&FP3); + PTR4 = reinterpret_cast(&FP4); + PTR1[0] = 0; PTR1[1] = B1; // small endian for X86 CPU + PTR2[0] = 0; PTR2[1] = B2; + PTR3[0] = 0; PTR3[1] = B3; + PTR4[0] = 0; PTR4[1] = B4; + OutPTR[0] = __float2half_rn ( __half2float(FP1) * 4096.0f * __half2float(scale[(4*i)/K]) ); + OutPTR[1] = __float2half_rn ( __half2float(FP2) * 4096.0f * __half2float(scale[(4*i)/K]) ); + OutPTR[2] = __float2half_rn ( __half2float(FP3) * 4096.0f * __half2float(scale[(4*i)/K]) ); + OutPTR[3] = __float2half_rn ( __half2float(FP4) * 4096.0f * __half2float(scale[(4*i)/K]) ); + // + OutPTR +=4; + } +} + + +#include +#include +#include + +namespace torchao { + +// https://github.com/microsoft/DeepSpeed/blob/0fc19b6a320cf8aa0a5f6c2b1fa310bae9a70d94/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp#L194 +at::Tensor fp16_to_fp6_cpu(at::Tensor fp16_tensor) +{ + TORCH_CHECK(fp16_tensor.dim() == 2, "weight must be 2-dimensional"); + TORCH_CHECK(fp16_tensor.scalar_type() == torch::kFloat16, "weight must be FP16"); + TORCH_CHECK(fp16_tensor.is_contiguous(), "weight must be contiguous"); + TORCH_CHECK(fp16_tensor.device().type() == torch::kCPU, "weight must be on CPU"); + auto M = fp16_tensor.size(0); + auto K = fp16_tensor.size(1); + TORCH_CHECK(K % 4 == 0, "K must be multiple of 4"); + + // Pack weight from FP16 to FP6. + auto options = at::TensorOptions().dtype(torch::kUInt8).device(torch::kCPU); + auto packed_fp6_tensor = at::empty({M, K * 6 / 8}, options); + uint8_t* packed_fp6_ptr = packed_fp6_tensor.data_ptr(); + + uint16_t* fake_fp6_ptr = reinterpret_cast(fp16_tensor.data_ptr()); + weight_prepacking_fp16_to_fp6(fake_fp6_ptr, packed_fp6_ptr, M, K); + + return packed_fp6_tensor; +} + +/* + * Dequant a FP6 matrix to a equivalent FP16 matrix using CPUs. + * A useful tool to construct input matrices for the FP16 GEMM baseline. + * [Input] + * fp6_tensor: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. + * fp16_scale: half tensor of shape [OC]; // for row-wise quantization. + * [Output] + * fp16_tensor: half tensor of shape [OC, IC]. + */ +at::Tensor weight_matrix_dequant_cpu(at::Tensor fp6_tensor, at::Tensor fp16_scale) +{ + int OC = fp6_tensor.size(0); + TORCH_CHECK(fp6_tensor.size(1) % 3 == 0); + int IC = fp6_tensor.size(1) / 3 * 16; + TORCH_CHECK(fp16_scale.size(0) == OC); + // + auto fp6_tensor_ptr = reinterpret_cast(fp6_tensor.data_ptr()); + auto fp16_scale_ptr = reinterpret_cast(fp16_scale.data_ptr()); + // + auto options = at::TensorOptions().dtype(at::kHalf).device(fp16_scale.device()); + at::Tensor fp16_tensor = at::empty({OC, IC}, options); + auto fp16_tensor_ptr = reinterpret_cast(fp16_tensor.data_ptr()); + // + DeQuantMatrix_FP6_To_FP16(fp16_tensor_ptr, fp6_tensor_ptr, OC, IC, fp16_scale_ptr); + // + return fp16_tensor; +} + +TORCH_LIBRARY_IMPL(torchao, CPU, m) { + m.impl("torchao::fp16_to_fp6", &fp16_to_fp6_cpu); + m.impl("torchao::fp6_weight_dequant", &weight_matrix_dequant_cpu); +} + +} diff --git a/torchao/csrc/fp6_llm/README.md b/torchao/csrc/fp6_llm/README.md new file mode 100644 index 0000000000..ff764cc27d --- /dev/null +++ b/torchao/csrc/fp6_llm/README.md @@ -0,0 +1,7 @@ +# FP6-LLM kernel + +This kernel is adapted from https://github.com/usyd-fsalab/fp6_llm. It performs linear op (A @ W.T), where A is in FP16 and W is in FP6 (E3M2 without infinities and NaN). + +On most hardware, this kernel is faster than FP16 linear for batch size from 1 to 128, and slower for batch size larger than or equal to 256. See https://github.com/usyd-fsalab/fp6_llm/issues/8 for a detailed discussion. + +See https://github.com/pytorch/ao/pull/223 for some benchmark results. diff --git a/torchao/csrc/fp6_llm/fp6_llm.cpp b/torchao/csrc/fp6_llm/fp6_llm.cpp new file mode 100644 index 0000000000..794c79df11 --- /dev/null +++ b/torchao/csrc/fp6_llm/fp6_llm.cpp @@ -0,0 +1,11 @@ +#include +#include +#include + +TORCH_LIBRARY_FRAGMENT(torchao, m) { + m.impl_abstract_pystub("torchao.ops"); + m.def("fp16act_fp6weight_linear(Tensor _in_feats, Tensor _weights, Tensor _scales, int splitK) -> Tensor"); + m.def("prepack_fp6_weight(Tensor fp6_tensor) -> Tensor"); + m.def("fp16_to_fp6(Tensor fp16_tensor) -> Tensor"); + m.def("fp6_weight_dequant(Tensor fp6_tensor, Tensor fp16_scale) -> Tensor"); +} diff --git a/torchao/csrc/fp6_llm/weight_prepacking.cpp b/torchao/csrc/fp6_llm/weight_prepacking.cpp new file mode 100644 index 0000000000..89a1171f5e --- /dev/null +++ b/torchao/csrc/fp6_llm/weight_prepacking.cpp @@ -0,0 +1,220 @@ +// Copyright 2024 FP6-LLM authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is adapted from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_prepacking.h + +#include +#include +#include + +using namespace std; + +void Padding_8_FP6_To_8_Bytes(unsigned char Padded_FP6[], unsigned char* FP6_Array) // padding 0 to the lowerest bit location +{ + Padded_FP6[0] = FP6_Array[0] & 0xfc; + Padded_FP6[1] = (FP6_Array[0]<<6) | ((FP6_Array[1]>>2) & 0xfc); + Padded_FP6[2] = (FP6_Array[1]<<4) | ((FP6_Array[2]>>4) & 0xfc ); + Padded_FP6[3] = FP6_Array[2]<<2; + Padded_FP6[4] = FP6_Array[3] & 0xfc; + Padded_FP6[5] = (FP6_Array[3]<<6) | ((FP6_Array[4]>>2) & 0xfc); + Padded_FP6[6] = (FP6_Array[4]<<4) | ((FP6_Array[5]>>4) & 0xfc); + Padded_FP6[7] = FP6_Array[5]<<2; +} + +unsigned char Extract_2_Bits_From_4_PaddedFP6(unsigned char B1, unsigned char B2, unsigned char B3, unsigned char B4) +{ + unsigned char out; + out = (B1&0xc0) | ( (B2&0xc0) >> 2 ) | ( (B3&0xc0) >> 4 ) | ( (B4&0xc0) >> 6 ); + return out; +} + +unsigned char Extract_4_Bits_From_2_PaddedFP6(unsigned char B1, unsigned char B2) // The highest two bits are already extracted by Extract_2_Bits_From_4_PaddedFP6(); +{ + unsigned char out; + out = ( (B1<<2) & 0xf0 ) | ( (B2>>2) & 0x0f ); + return out; +} + +// dealing with 4 1*8 blocks of FP6 +void Assign_32_FP6_To_4_Thread(vector Seg_2bit[], vector Seg_4bit[], unsigned char* PTR_1, unsigned char* PTR_2, unsigned char* PTR_3, unsigned char* PTR_4) +{ + unsigned char Padded_8_FP8[4][8]; + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[0], PTR_1); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[1], PTR_2); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[2], PTR_3); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[3], PTR_4); + // + unsigned char Seg1_Byte1_T[4]; + unsigned char Seg1_Byte2_T[4]; + unsigned char Seg2_Byte1_T[4]; + unsigned char Seg2_Byte2_T[4]; + unsigned char Seg2_Byte3_T[4]; + unsigned char Seg2_Byte4_T[4]; + for(int t=0; t<4; t++) + { + Seg1_Byte1_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[0][0+t*2], Padded_8_FP8[0][1+t*2], Padded_8_FP8[1][0+t*2], Padded_8_FP8[1][1+t*2]); + Seg1_Byte2_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[2][0+t*2], Padded_8_FP8[2][1+t*2], Padded_8_FP8[3][0+t*2], Padded_8_FP8[3][1+t*2]); + Seg2_Byte1_T[t] = Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[0][0+t*2], Padded_8_FP8[0][1+t*2]); + Seg2_Byte2_T[t] = Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[1][0+t*2], Padded_8_FP8[1][1+t*2]); + Seg2_Byte3_T[t] = Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[2][0+t*2], Padded_8_FP8[2][1+t*2]); + Seg2_Byte4_T[t] = Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[3][0+t*2], Padded_8_FP8[3][1+t*2]); + } + // + for(int t=0; t<4; t++) + { + Seg_2bit[t].push_back(Seg1_Byte1_T[t]); + Seg_2bit[t].push_back(Seg1_Byte2_T[t]); + Seg_4bit[t].push_back(Seg2_Byte1_T[t]); + Seg_4bit[t].push_back(Seg2_Byte2_T[t]); + Seg_4bit[t].push_back(Seg2_Byte3_T[t]); + Seg_4bit[t].push_back(Seg2_Byte4_T[t]); + } + return; +} + +void BitInterleaving_2bit(unsigned char* PTR_4Bytes) +{ + unsigned int *PTR_UINT = reinterpret_cast(PTR_4Bytes); + unsigned int input = *PTR_UINT; + // + //int order_2bit[16] = {1,5,9,13,3,7,11,15,2,6,10,14,4,8,12,16}; // pre-defined order for bit-interleaving in FP6-LLM + int order_2bit[16] = {2,6,10,14,4,8,12,16,1,5,9,13,3,7,11,15}; // pre-defined order for bit-interleaving in FP6-LLM + unsigned int Frags_2bit[16]; // The highest 2 bits are used to store the extracted fragments. + for(int i=0; i<16; i++) + Frags_2bit[i] = ( input << 2*(order_2bit[i]-1) ) & 0xc0000000; + // + unsigned int output = 0x00000000; + for(int i=0; i<16; i++) + output |= ( Frags_2bit[i] >> (i*2) ); + // + *PTR_UINT = output; +} + +void BitInterleaving_4bit(unsigned char* PTR_4Bytes) +{ + unsigned int *PTR_UINT = reinterpret_cast(PTR_4Bytes); + unsigned int input = *PTR_UINT; + // + //int order_4bit[8] = {1,5,3,7,2,6,4,8}; // pre-defined order for bit-interleaving in FP6-LLM + int order_4bit[8] = {2,6,4,8,1,5,3,7}; // pre-defined order for bit-interleaving in FP6-LLM + unsigned int Frags_4bit[8]; // The highest4 bits are used to store the extracted fragments. + for(int i=0; i<8; i++) + Frags_4bit[i] = ( input << 4*(order_4bit[i]-1) ) & 0xf0000000; + // + unsigned int output = 0x00000000; + for(int i=0; i<8; i++) + output |= ( Frags_4bit[i] >> (i*4) ); + // + *PTR_UINT = output; +} + +/* + * Inputs: + * (1) unsigned char Weight_6bit [M*K*6/8] + * Outputs: + * (1) unsigned char Weight_2bit [M*K*2/8] + * (2) unsigned char Weight_4bit [M*K*4/8] + * + * Assumption: Weight_6bit, Weight_2bit, Weight_4bit all stored continuously in row-major. + * 8 FP6 = 6 Bytes + * 8 FP4 = 4 Bytes + * 8 FP2 = 2 Bytes + */ +void weight_matrix_prepacking(int* packed_weights, int *FP6Weights, size_t M, size_t K) +{ + assert(M % 64 == 0); + assert(K % 64 == 0); + // + unsigned char* Weight_6bit = reinterpret_cast(FP6Weights); + unsigned char* Weight_2bit = reinterpret_cast(packed_weights); + unsigned char* Weight_4bit = Weight_2bit + M*K*2/8; + // + vector A_Segment_2bit[32]; + vector A_Segment_4bit[32]; + // + size_t BytesPerRow = K*6/8; + // Pass-1: (1) 2+4 split; (2) assign weights to 32 threads. + for (size_t i = 0; i < M / 64; i++) // + { + for (size_t j = 0; j < K / 16; j++) + { + for(size_t k=0; k<64/16; k++) + { + size_t row = i*64 + k*16; + size_t col = j*16; + unsigned char* StartPTR_1 = Weight_6bit + row*BytesPerRow + col*6/8; + unsigned char* StartPTR_2 = StartPTR_1 + 8*BytesPerRow; + unsigned char* StartPTR_3 = StartPTR_1 + 8*6/8; + unsigned char* StartPTR_4 = StartPTR_2 + 8*6/8; + // Dealing with each 16*16 blocks then... + for(int l=0; l<8; l++) Assign_32_FP6_To_4_Thread(&A_Segment_2bit[l*4], &A_Segment_4bit[l*4], StartPTR_1+l*BytesPerRow, StartPTR_2+l*BytesPerRow, StartPTR_3+l*BytesPerRow, StartPTR_4+l*BytesPerRow); + } + } + } + // Verifying the length of 2_bit segments and 4_bit segments + size_t BytesPerThread_2bit = M*K*2/8/32; + size_t BytesPerThread_4bit = M*K*4/8/32; + for(int i=0; i<32; i++) + { + assert(A_Segment_2bit[i].size()==BytesPerThread_2bit); + assert(A_Segment_4bit[i].size()==BytesPerThread_4bit); + } + // Pass-2: Optimizing coleasced global memory access + for(size_t i=0; i +#include + +namespace torchao { + +/* + * Weight prepacking (Pytorch interface). + * [Input & Output] + * fp6_tensor: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. + * [Output] + * packed_tensor: int tensor of shape [OC, IC // 16 * 3]; + */ +at::Tensor weight_matrix_prepacking_cpu(at::Tensor fp6_tensor) +{ + size_t OC = fp6_tensor.size(0); + size_t IC = fp6_tensor.size(1); + TORCH_CHECK(IC % 3 == 0, "Expect packed input dim % 3 == 0, but receive ", IC, " instead."); + IC = IC * 16 / 3; + TORCH_CHECK((OC % 256 == 0) && (IC % 64 == 0), "Expect output dim % 256 == 0 and input dim % 64 == 0, but receive ", OC, " and ", IC, " instead."); + auto packed_tensor = at::empty_like(fp6_tensor); + auto packed_tensor_ptr = reinterpret_cast(packed_tensor.data_ptr()); + auto fp6_tensor_ptr = reinterpret_cast(fp6_tensor.data_ptr()); + weight_matrix_prepacking(packed_tensor_ptr, fp6_tensor_ptr, OC, IC); + return packed_tensor; +} + +TORCH_LIBRARY_IMPL(torchao, CPU, m) { + m.impl("torchao::prepack_fp6_weight", &weight_matrix_prepacking_cpu); +} + +} diff --git a/torchao/ops.py b/torchao/ops.py index 0931d32026..3a25dbf6db 100644 --- a/torchao/ops.py +++ b/torchao/ops.py @@ -21,3 +21,88 @@ def _(dets, scores, iou_threshold): ctx = torch._custom_ops.get_ctx() num_to_keep = ctx.create_unbacked_symint() return dets.new_empty(num_to_keep, dtype=torch.long) + + +def prepack_fp6_weight(fp6_weight: Tensor) -> Tensor: + """ + Pack FP6 tensor in a layout for use with FP6-LLM. See https://arxiv.org/abs/2401.14112 for more details. + + Arguments + fp6_weight: tightly-packed fp6_weight, inside a `torch.int32` container + + Returns + packed FP6 tensor for use with FP6-LLM, inside a `torch.int32` container + """ + return torch.ops.torchao.prepack_fp6_weight.default(fp6_weight) + + +@torch.library.impl_abstract("torchao::prepack_fp6_weight") +def _(fp6_weight): + torch._check(fp6_weight.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_weight.dim()}D") + return torch.empty_like(fp6_weight) + + +def fp16_to_fp6(fp16_tensor: Tensor) -> Tensor: + """ + Pack FP16 tensor (containing only FP6 values) into FP6 tensor. + """ + return torch.ops.torchao.fp16_to_fp6.default(fp16_tensor) + + +@torch.library.impl_abstract("torchao::fp16_to_fp6") +def _(fp16_tensor): + torch._check(fp16_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp16_tensor.dim()}D") + torch._check(fp16_tensor.dtype is torch.float16, lambda: f"weight must be FP16, got {fp16_tensor.dtype}") + M, K = fp16_tensor.shape + torch._check(K % 4 == 0, lambda: f"second dimension must be a multiple of 4, got {K}") + return torch.empty((M, K * 6 // 8), dtype=torch.uint8, device=fp16_tensor.device) + + +def fp16act_fp6weight_linear(_in_feats: Tensor, _weights: Tensor, _scales: Tensor, splitK: int = 1) -> Tensor: + """ + FP6-LLM linear layer A @ W.T. See https://arxiv.org/abs/2401.14112 for more details. + + Arguments + _in_feats: input activations in FP16 + _weights: packed FP6 weights. See :func:prepack_fp6_weight and :func:fp16_to_fp6 + _scales: scale + splitK: split K + + Returns + output of linear layer + """ + return torch.ops.torchao.fp16act_fp6weight_linear.default(_in_feats, _weights, _scales, splitK) + + +@torch.library.impl_abstract("torchao::fp16act_fp6weight_linear") +def _(_in_feats, _weights, _scales, splitK = 1): + torch._check(_in_feats.dim() == 2, lambda: f"input should be a 2d tensor, got {_in_feats.dim()}D") + torch._check(_in_feats.dtype is torch.float16, lambda: f"weight must be FP16, got {_in_feats.dtype}") + torch._check(_weights.dim() == 2, lambda: f"weight should be a 2d tensor, got {_weights.dim()}D") + torch._check(_weights.dtype is torch.int32, lambda: f"weight must be INT32, got {_weights.dtype}") + torch._check(_scales.dim() == 1, lambda: f"scale should be a 2d tensor, got {_scales.dim()}D") + torch._check(_scales.dtype is torch.float16, lambda: f"scale must be FP16, got {_scales.dtype}") + + BS, IC = _in_feats.shape + OC, _ = _weights.shape + torch._check(IC / 16 * 3 == _weights.shape[1], lambda: "Dimensions mismatched") + torch._check(OC == _scales.shape[0], lambda: "Dimensions mismatched") + + return _in_feats.new_empty((BS, OC)) + + +def fp6_weight_dequant(fp6_tensor: Tensor, fp16_scale: Tensor) -> Tensor: + return torch.ops.torchao.fp6_weight_dequant.default(fp6_tensor, fp16_scale) + + +@torch.library.impl_abstract("torchao::fp6_weight_dequant") +def _(fp6_tensor, fp16_scale): + torch._check(fp6_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_tensor.dim()}D") + torch._check(fp6_tensor.dtype is torch.int32, lambda: f"weight must be INT32, got {fp6_tensor.dtype}") + torch._check(fp16_scale.dim() == 1, lambda: f"scale should be a 2d tensor, got {fp16_scale.dim()}D") + torch._check(fp16_scale.dtype is torch.float16, lambda: f"scale must be FP16, got {fp16_scale.dtype}") + + OC, _IC = fp6_tensor.shape + torch._check(OC == fp16_scale.shape[0], lambda: "Dimensions mismatched") + + return fp16_scale.new_empty((OC, _IC * 16 // 3)) From 6af40e5a2055fc4d8987e49fbc44bc5588fa9a23 Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Tue, 14 May 2024 17:22:00 -0400 Subject: [PATCH 30/61] Update and rename nightly-build.yml to build.yml (#240) * Update and rename nightly-build.yml to build.yml * Update build.yml * Update build.yml * Update build.yml * Update build.yml * Update build.yml * Update build.yml * Update build.yml --- .github/workflows/build.yml | 59 +++++++++++++++++++++++++++++ .github/workflows/nightly-build.yml | 31 --------------- 2 files changed, 59 insertions(+), 31 deletions(-) create mode 100644 .github/workflows/build.yml delete mode 100644 .github/workflows/nightly-build.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000000..4248a1ebbf --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,59 @@ +name: PyPI Nightly Build + +on: + schedule: + - cron: '0 0 * * *' # Runs at midnight UTC every day + workflow_dispatch: + inputs: + build-type: + description: 'Choose build type: nightly or release' + required: true + default: 'release' + options: + - nightly + - release + ref: + description: 'Branch or tag name' + required: false + default: 'main' + +jobs: + build-and-publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.inputs.ref }} + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + pip install -r requirements.txt + - name: Build package + run: | + if [ "${{ github.event_name }}" = "schedule" ]; then + export TORCHAO_NIGHTLY=1 + elif [ "${{ github.event.inputs['build-type'] }}" = "nightly" ]; then + export TORCHAO_NIGHTLY=1 + fi + pip install . + - name: Publish package to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} + repository_url: https://upload.pypi.org/legacy/ + packages_dir: dist/ + + - name: Open issue on failure + if: ${{ failure() && github.event_name == 'schedule' }} + uses: dacbd/create-issue-action@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + title: Nightly Build failed + body: Commit ${{ github.sha }} daily scheduled [CI run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) failed, please check why + assignees: '' diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml deleted file mode 100644 index 45b89bb1db..0000000000 --- a/.github/workflows/nightly-build.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: PyPI Nightly Build - -on: - schedule: - - cron: '0 0 * * *' # Runs at midnight UTC every day - workflow_dispatch: - -jobs: - build-and-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - - name: Build package - run: | - export TORCHAO_NIGHTLY=1 - python setup.py sdist bdist_wheel - - name: Publish package to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} - repository_url: https://upload.pypi.org/legacy/ - packages_dir: dist/ From 8059da60517aed9f43f3d775b6a395c9d1c627b6 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 14 May 2024 15:26:29 -0700 Subject: [PATCH 31/61] Fix CI after do_bench refactor in pytorch inductor (#242) Summary: We are relying on some private APIs from inductor and a recent refactor: https://github.com/pytorch/pytorch/pull/125736 broken the do_bench API we rely on for autoquant, maybe we should use our own do_bench or rely on triton's directly? Test Plan: regression tests python test/integration/test_integration.py -k test_autoquant_one_input_29_cuda Reviewers: Subscribers: Tasks: Tags: --- torchao/quantization/autoquant.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/torchao/quantization/autoquant.py b/torchao/quantization/autoquant.py index fc38c04169..4331d9b042 100644 --- a/torchao/quantization/autoquant.py +++ b/torchao/quantization/autoquant.py @@ -15,6 +15,8 @@ except: from torch._inductor.runtime.runtime_utils import do_bench +from .utils import TORCH_VERSION_AFTER_2_4 + aten = torch.ops.aten AUTOQUANT_CACHE = {} @@ -197,7 +199,11 @@ def do_autoquant_bench(op, *args, **kwargs): graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream): op(*args, **kwargs) - res = do_bench(lambda: graph.replay(), warmup=warmup, rep=rep, return_mode="median") + if TORCH_VERSION_AFTER_2_4: + from torch._inductor.runtime.runtime_utils import do_bench_gpu + res = do_bench_gpu(lambda: graph.replay(), warmup=warmup, rep=rep, return_mode="median") + else: + res = do_bench(lambda: graph.replay(), warmup=warmup, rep=rep, return_mode="median") return res def _is_interpolate_mode(mode): From e7bbbd2d3ca9713f150178d4cf043cd758d7004f Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Tue, 14 May 2024 18:37:25 -0400 Subject: [PATCH 32/61] python setup.py in build scripts (#241) * python setup.py in build scripts * push * Update build.yml --- .github/workflows/build.yml | 44 +++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4248a1ebbf..8678f4adbf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,18 +12,12 @@ on: options: - nightly - release - ref: - description: 'Branch or tag name' - required: false - default: 'main' jobs: build-and-publish: - runs-on: ubuntu-latest + runs-on: linux.g5.12xlarge.nvidia.gpu steps: - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.ref }} - name: Set up Python uses: actions/setup-python@v4 with: @@ -33,6 +27,23 @@ jobs: python -m pip install --upgrade pip pip install setuptools wheel twine pip install -r requirements.txt + + - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main + with: + runner: ${{ matrix.runs-on }} + gpu-arch-type: "cuda" + gpu-arch-version: "12.1" + script: | + conda create -n venv python=3.9 -y + conda activate venv + echo "::group::Install newer objcopy that supports --set-section-alignment" + yum install -y devtoolset-10-binutils + export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH + python -m pip install --upgrade pip + pip install torch + pip install -r requirements.txt + pip install -r dev-requirements.txt + - name: Build package run: | if [ "${{ github.event_name }}" = "schedule" ]; then @@ -40,7 +51,8 @@ jobs: elif [ "${{ github.event.inputs['build-type'] }}" = "nightly" ]; then export TORCHAO_NIGHTLY=1 fi - pip install . + python setup.py sdist bdist_wheel + pytest test --verbose -s - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: @@ -49,11 +61,11 @@ jobs: repository_url: https://upload.pypi.org/legacy/ packages_dir: dist/ - - name: Open issue on failure - if: ${{ failure() && github.event_name == 'schedule' }} - uses: dacbd/create-issue-action@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - title: Nightly Build failed - body: Commit ${{ github.sha }} daily scheduled [CI run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) failed, please check why - assignees: '' + # - name: Open issue on failure + # if: ${{ failure() && github.event_name == 'schedule' }} + # uses: dacbd/create-issue-action@v1 + # with: + # token: ${{ secrets.GITHUB_TOKEN }} + # title: Nightly Build failed + # body: Commit ${{ github.sha }} daily scheduled [CI run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) failed, please check why + # assignees: '' From 10da375e52eaea8240b963f90845569c5e735c2b Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 14 May 2024 16:14:30 -0700 Subject: [PATCH 33/61] Enable dispatch to tinygemm int4 and int8 kernels for quantized tensor (#230) Summary: This adds some dispatch to the tinygemm kernels for cuda, although need to resolve implementation mismatch problem for tinygemm first Test Plan: python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_int4 python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_int8 Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_api.py | 105 +++++++++++++++++++-- test/quantization/test_quant_primitives.py | 11 +-- torchao/quantization/autoquant.py | 1 + torchao/quantization/quant_primitives.py | 85 +++++++++++++---- torchao/quantization/subclass.py | 84 +++++++++++++++-- 5 files changed, 243 insertions(+), 43 deletions(-) diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index 10d36f0c1b..cea659e61d 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -9,7 +9,6 @@ import unittest import torch import os -from torch._export import capture_pre_autograd_graph from torch.ao.quantization.quantize_pt2e import ( prepare_pt2e, convert_pt2e, @@ -36,7 +35,7 @@ def dynamic_quant(model, example_inputs): - m = capture_pre_autograd_graph(model, example_inputs) + m = torch.export.export(model, example_inputs).module() quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config(is_dynamic=True)) m = prepare_pt2e(m, quantizer) m = convert_pt2e(m) @@ -50,14 +49,14 @@ def _apply_dynamic_quant(model): """ _replace_with_custom_fn_if_matches_filter( model, - lambda linear_mod: dynamic_quant(linear_mod, (torch.randn(1, linear_mod.in_features))), + lambda linear_mod: dynamic_quant(linear_mod, (torch.randn(1, linear_mod.in_features),)), lambda mod, fqn: isinstance(mod, torch.nn.Linear), ) return model def capture_and_prepare(model, example_inputs): - m = capture_pre_autograd_graph(model, example_inputs) + m = torch.export.export(model, example_inputs) quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config(is_dynamic=True)) m = prepare_pt2e(m, quantizer) # TODO: we can run the weight observer in convert_pt2e so that user don't need to run this @@ -88,13 +87,13 @@ def quantize(self, model: torch.nn.Module) -> torch.nn.Module: return model class ToyLinearModel(torch.nn.Module): - def __init__(self): + def __init__(self, m=64, n=32, k=64): super().__init__() - self.linear1 = torch.nn.Linear(64, 32, bias=False).to(torch.float) - self.linear2 = torch.nn.Linear(32, 64, bias=False).to(torch.float) + self.linear1 = torch.nn.Linear(m, n, bias=False).to(torch.float) + self.linear2 = torch.nn.Linear(n, k, bias=False).to(torch.float) def example_inputs(self): - return (torch.randn(1, 64).to(torch.float),) + return (torch.randn(1, self.linear1.in_features).to(torch.float),) def forward(self, x): x = self.linear1(x) @@ -104,8 +103,9 @@ def forward(self, x): class TestQuantFlow(unittest.TestCase): def test_dynamic_quant_gpu_singleline(self): m = ToyLinearModel().eval() + example_inputs = m.example_inputs() m = _apply_dynamic_quant(m) - quantized = m(*m.example_inputs()) + quantized = m(*example_inputs) # AssertionError: Expecting input to have dtype torch.float32, but got dtype: torch.float64 # While executing %choose_qparams_tensor_1 : [num_users=2] = call_function[target=torch.ops.quantized_decomposed.choose_qparams.tensor](args = (%arg0_3, -128, 127, 0.000244140625, torch.int8), kwargs = {}) # m = torch.compile(m, mode="max-autotune") @@ -442,7 +442,94 @@ def get_per_token_block_size(x): ref = m_copy(*example_inputs) self.assertTrue(torch.equal(res, ref)) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_quantized_tensor_subclass_int4(self): + from torchao.quantization.subclass import AffineQuantizedTensor + from torchao.quantization.quant_primitives import MappingType + from torchao.quantization.quant_primitives import ZeroPointDomain + import copy + + # weight settings + groupsize = 32 + mapping_type = MappingType.ASYMMETRIC + block_size = (1, groupsize) + target_dtype = torch.int32 + quant_min = 0 + quant_max = 15 + eps = 1e-6 + preserve_zero = False + zero_point_dtype = torch.bfloat16 + + # weight only quantization + input_quant_func = None + + # use 1024 so that we don't need padding + m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") + m_copy = copy.deepcopy(m) + example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) + + def to_quantized(weight): + return AffineQuantizedTensor.from_float( + weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, + zero_point_dtype=zero_point_dtype, + preserve_zero=preserve_zero, + zero_point_domain=ZeroPointDomain.FLOAT, + input_quant_func=input_quant_func, + ) + + m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) + m.linear2.weight = torch.nn.Parameter(to_quantized(m.linear2.weight), requires_grad=False) + assert isinstance(m.linear1.weight, AffineQuantizedTensor) + assert isinstance(m.linear2.weight, AffineQuantizedTensor) + + # reference + from torchao.quantization.quant_api import change_linear_weights_to_int4_woqtensors + change_linear_weights_to_int4_woqtensors(m_copy, groupsize=groupsize) + + res = m(*example_inputs) + ref = m_copy(*example_inputs) + + self.assertTrue(torch.equal(res, ref)) + + + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_quantized_tensor_subclass_int8(self): + from torchao.quantization.subclass import AffineQuantizedTensor + from torchao.quantization.quant_primitives import MappingType + import copy + + # weight settings + mapping_type = MappingType.SYMMETRIC + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + zero_point_dtype = torch.int64 + + # weight only quantization + input_quant_func = None + + m = ToyLinearModel().eval().to(torch.bfloat16) + m_copy = copy.deepcopy(m) + example_inputs = tuple(map(lambda x: x.to(torch.bfloat16), m.example_inputs())) + + def to_quantized(weight): + block_size = (1, weight.shape[1]) + return AffineQuantizedTensor.from_float(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype, input_quant_func=input_quant_func) + + m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) + m.linear2.weight = torch.nn.Parameter(to_quantized(m.linear2.weight), requires_grad=False) + assert isinstance(m.linear1.weight, AffineQuantizedTensor) + assert isinstance(m.linear2.weight, AffineQuantizedTensor) + + # reference + from torchao.quantization.quant_api import change_linear_weights_to_int8_woqtensors + change_linear_weights_to_int8_woqtensors(m_copy) + + res = m(*example_inputs) + ref = m_copy(*example_inputs) + torch.testing.assert_close(res, ref, rtol=0.00001, atol=1e-2) if __name__ == "__main__": diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 291039e42a..a64439a25e 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -327,6 +327,8 @@ def test_not_preserve_zero_not_supported(self): def test_tinygemm_get_groupwise_affine_qparams(self): + from torchao.quantization.quant_primitives import ZeroPointDomain + input = torch.randn(10, 256) n_bit = 4 scale_ref, zero_point_ref = get_groupwise_affine_qparams(input, n_bit=n_bit, groupsize=128, dtype=torch.bfloat16) @@ -351,16 +353,11 @@ def test_tinygemm_get_groupwise_affine_qparams(self): scale_dtype=scale_dtype, zero_point_dtype=zero_point_dtype, preserve_zero=False, + zero_point_domain=ZeroPointDomain.FLOAT, ) - def int_zero_point_to_float(zero_point, scale, qaunt_min, mid_point): - return (quant_min - zero_point + mid_point) * scale - - mid_point = 2 ** (n_bit - 1) - zero_point_float = int_zero_point_to_float(zero_point, scale, quant_min, mid_point) - self.assertTrue(torch.equal(scale, scale_ref)) - torch.testing.assert_close(zero_point_float, zero_point_ref, rtol=0.00001, atol=torch.max(scale)*0.03) + self.assertTrue(torch.equal(zero_point, zero_point_ref)) if __name__ == "__main__": diff --git a/torchao/quantization/autoquant.py b/torchao/quantization/autoquant.py index 4331d9b042..4c0ae53ce8 100644 --- a/torchao/quantization/autoquant.py +++ b/torchao/quantization/autoquant.py @@ -9,6 +9,7 @@ quantize_activation_per_token_absmax, safe_int_mm, ) +from .utils import TORCH_VERSION_AFTER_2_4 import torch.nn.functional as F try: from torch._inductor.utils import do_bench diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 3975284b61..4f39a6055d 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -72,6 +72,14 @@ def guard_dtype_size(tensor_arg, arg_name, dtype=None, size=None): torch.uint7: (0, 2**7-1), }) +class MappingType(Enum): + SYMMETRIC = 0 + ASYMMETRIC = 1 + +class ZeroPointDomain(Enum): + INT = 0 + FLOAT = 1 + # TODO: decide on if we want to allow custom quant_min/quant_max here def _get_and_check_qmin_qmax(dtype, quant_min, quant_max): """Get quant_min and quant_max args based on dtype and also @@ -141,7 +149,8 @@ def quantize_affine( zero_point: Optional[torch.Tensor], output_dtype: torch.dtype, quant_min: Optional[int] = None, - quant_max: Optional[int] = None + quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, ): """ Args: @@ -153,6 +162,12 @@ def quantize_affine( output_dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor quant_min (Optional[int]): minimum quantized value for output Tensor, if not specified, it will be derived from dtype quant_max (Optional[int]): maximum quantized value for output Tensor, if not specified, it will be derived from dtype + zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float + if zero_point is in integer domain, zero point is added to the quantized integer value during + quantization + if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) + value during quantization + default is ZeroPointDomain.INT Note: How can block_size represent different granularities? @@ -184,9 +199,19 @@ def quantize_affine( if zero_point is not None: zero_point = zero_point.view(shape_after_reduction) - quant = torch.clamp( - torch.round(input / scale) + zero_point, quant_min, quant_max - ).to(output_dtype) + if zero_point_domain == ZeroPointDomain.INT: + quant = torch.clamp( + torch.round(input / scale) + zero_point, quant_min, quant_max + ).to(output_dtype) + else: + assert zero_point_domain == ZeroPointDomain.FLOAT + mid_point = (quant_max + quant_min + 1) / 2 + min_val = zero_point - scale * mid_point + quant = ( + torch.clamp( + torch.round((input - min_val) / scale), + quant_min, quant_max) + ).to(output_dtype) quant = quant.view(original_shape) return quant @@ -199,6 +224,7 @@ def dequantize_affine( input_dtype: torch.dtype, quant_min: Optional[int] = None, quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, *, output_dtype: torch.dtype = torch.float32, ): @@ -213,6 +239,12 @@ def dequantize_affine( quant_min (Optional[int]): minimum quantized value for input Tensor quant_max (Optional[int]): maximum quantized value for input Tensor output_dtype (torch.dtype): dtype for output Tensor, default is fp32 + zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float + if zero_point is in integer domain, zero point is added to the quantized integer value during + quantization + if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) + value during quantization + default is ZeroPointDomain.INT Output: dequantized Tensor, with requested dtype or fp32 @@ -233,18 +265,22 @@ def dequantize_affine( if zero_point is not None: zero_point = zero_point.view(shape_after_reduction) - dequant = input.to(torch.int32) - if zero_point is not None: - dequant -= zero_point.to(torch.int32) - dequant = dequant.to(output_dtype) - dequant *= scale - dequant = dequant.view(original_shape) - return dequant.to(output_dtype) + if zero_point_domain == ZeroPointDomain.INT: + dequant = input.to(torch.int32) + if zero_point is not None: + dequant -= zero_point.to(torch.int32) + dequant = dequant.to(output_dtype) + dequant *= scale + else: + assert zero_point_domain == ZeroPointDomain.FLOAT, f"Unexpected zero point domain: {zero_point_domain}" + mid_point = (quant_max + quant_min + 1) / 2 + dequant = input - mid_point + dequant = dequant.to(output_dtype) + dequant *= scale + if zero_point is not None: + dequant += zero_point - -class MappingType(Enum): - SYMMETRIC = 0 - ASYMMETRIC = 1 + return dequant.view(original_shape).to(output_dtype) def choose_qparams_affine( input: torch.Tensor, @@ -256,7 +292,8 @@ def choose_qparams_affine( eps: Optional[float] = None, scale_dtype: Optional[torch.dtype] = None, zero_point_dtype: Optional[torch.dtype] = None, - preserve_zero = True, + preserve_zero: bool = True, + zero_point_domain = ZeroPointDomain.INT, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: @@ -280,6 +317,13 @@ def choose_qparams_affine( If we don't need zero to be exactly representable, we won't do rounding and clamping for zero_point + zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float + if zero_point is in integer domain, zero point is added to the quantized integer value during + quantization + if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) + value during quantization + default is ZeroPointDomain.INT + Output: Tuple of scales and zero_points Tensor with requested dtype """ @@ -310,15 +354,18 @@ def choose_qparams_affine( scale = max_val_pos / (float(quant_max - quant_min) / 2) if not preserve_zero: raise ValueError("preserve_zero == False is not supported for symmetric quantization") - zero_point = torch.full_like(scale, int((quant_min + quant_max + 1) / 2)) + if zero_point_domain != ZeroPointDomain.INT: + raise ValueError("zero_point_domain != ZeroPointDomain.INT is not supported for symmetric quantization") + zero_point = torch.full_like(scale, int((quant_max + quant_min + 1) / 2)) else: scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min) if preserve_zero: zero_point = quant_min - torch.round(min_val_neg / scale) zero_point = torch.clamp(zero_point, quant_min, quant_max) else: - zero_point = quant_min - min_val_neg / scale - + assert zero_point_domain == ZeroPointDomain.FLOAT, "if not preserve_zero, zero_point must be in FLOAT domain" + mid_point = (quant_max + quant_min + 1) / 2 + zero_point = min_val_neg + scale * mid_point if eps is None: eps = torch.finfo(input.dtype).eps diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 6128720d4d..607cb77766 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -14,10 +14,13 @@ dynamically_quantize_per_channel, groupwise_affine_quantize_tensor, quant_int8_dynamic_per_token_linear, + pack_tinygemm_scales_and_zeros, unpack_tinygemm_scales_and_zeros, + groupwise_affine_quantize_tensor_from_qparams, choose_qparams_affine, quantize_affine, dequantize_affine, + ZeroPointDomain, ) from .utils import find_multiple from typing import Tuple, Optional, Callable @@ -619,7 +622,13 @@ class AffineQuantizedTensor(torch.Tensor): shape (torch.Size): the shape for the Tensor quant_min (Optional[int]): minimum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` quant_max (Optional[int]): maximum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` - input_quant_func (Optional[Callable]): function for quantizing the input float Tensor to a quantized tensor subclass object, that takes input Tensor as input and outputs an AffineQuantizedTensor object + zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float + if zero_point is in integer domain, zero point is added to the quantized integer value during + quantization + if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) + value during quantization + default is ZeroPointDomain.INT + input_quant_func (Optional[Callable]): function for quantizing the input float Tensor to a quantized tensor subclass object, that takes float Tensor as input and outputs an AffineQuantizedTensor object dtype: dtype for external representation of the tensor, e.g. torch.float32 """ @@ -633,8 +642,10 @@ def __new__( shape: torch.Size, quant_min: Optional[int] = None, quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, input_quant_func: Optional[Callable] = None, dtype=None, + # TODO: remove args and kwargs *args, **kwargs ): @@ -658,6 +669,7 @@ def __init__( shape: torch.Size, quant_min: Optional[int] = None, quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, input_quant_func: Optional[Callable] = None, dtype=None, *args, @@ -669,6 +681,7 @@ def __init__( self.block_size = block_size self.quant_min = quant_min self.quant_max = quant_max + self.zero_point_domain = zero_point_domain self.input_quant_func = input_quant_func def __repr__(self): @@ -677,18 +690,20 @@ def __repr__(self): f"device={self.device}, dtype={self.dtype}, input_quant_func={self.input_quant_func}, requires_grad={self.requires_grad})" ) - def dequantize(self, output_dtype=torch.float32): - return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, output_dtype=output_dtype) + def dequantize(self, output_dtype=None): + if output_dtype is None: + output_dtype = self.dtype + return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, self.zero_point_domain, output_dtype=output_dtype) def __tensor_flatten__(self): - return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.input_quant_func, self.dtype] + return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.input_quant_func, self.dtype] @classmethod def __tensor_unflatten__( cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride ): int_data, scale, zero_point = tensor_data_dict["int_data"], tensor_data_dict["scale"], tensor_data_dict["zero_point"] - block_size, shape, quant_min, quant_max, input_quant_func, dtype = tensor_attributes + block_size, shape, quant_min, quant_max, zero_point_domain, input_quant_func, dtype = tensor_attributes return cls( int_data, scale, @@ -697,6 +712,7 @@ def __tensor_unflatten__( shape if outer_size is None else outer_size, quant_min, quant_max, + zero_point_domain, input_quant_func=input_quant_func, dtype=dtype, strides=outer_stride, @@ -715,9 +731,11 @@ def from_float( scale_dtype = None, zero_point_dtype = None, input_quant_func = None, + preserve_zero = True, + zero_point_domain = ZeroPointDomain.INT, ): - scale, zero_point = choose_qparams_affine(input_float, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype) - int_data = quantize_affine(input_float, block_size, scale, zero_point, target_dtype, quant_min, quant_max) + scale, zero_point = choose_qparams_affine(input_float, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype, preserve_zero, zero_point_domain) + int_data = quantize_affine(input_float, block_size, scale, zero_point, target_dtype, quant_min, quant_max, zero_point_domain) return cls( int_data, scale, @@ -726,6 +744,7 @@ def from_float( input_float.shape, quant_min, quant_max, + zero_point_domain, input_quant_func=input_quant_func, dtype=input_float.dtype ) @@ -740,7 +759,54 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): args[1], args[2] if len(args) > 2 else None, ) - if weight_qtensor.input_quant_func is not None: + if weight_qtensor.input_quant_func is None: + is_cuda = args[0].is_cuda + is_cpu = args[0].device == torch.device("cpu") + # weight only quantization + is_int8 = ( + weight_qtensor.int_data.dtype == torch.int8 and + weight_qtensor.quant_min is None or weight_qtensor.quant_min == -128 and + weight_qtensor.quant_max is None or weight_qtensor.quant_max == 127 + ) + is_uint4 = ( + weight_qtensor.int_data.dtype == torch.int32 and + weight_qtensor.quant_min == 0 and + weight_qtensor.quant_max == 15 + ) + + # TODO: enable cpu and mps path as well + # TODO: make sure weight dimension matches the expectation of the int4mm kernel + # TODO: move this to TinygemmAffineQuantizedTensor + if ( + is_cuda and + is_uint4 and + weight_qtensor.dtype == torch.bfloat16 and + len(weight_qtensor.shape) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.zero_point_domain == ZeroPointDomain.FLOAT + ): + # groupwise int4 quantization + # TODO: currently doing packing on the fly, we'll need to figure out + # the API to do packing before hand + # TODO: expose the arg + innerKTiles = 8 + packed_weight = torch.ops.aten._convert_weight_to_int4pack(weight_qtensor.int_data.to(torch.int32), innerKTiles) + scales_and_zeros = pack_tinygemm_scales_and_zeros(weight_qtensor.scale, weight_qtensor.zero_point) + groupsize = weight_qtensor.block_size[-1] + return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) + elif ( + is_cpu and + is_int8 and + len(weight_qtensor.shape) == 2 and + len(weight_qtensor.block_size) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.block_size[1] == weight_qtensor.shape[1] + ): + # TODO: enable mps path as well + # per channel int8 weight only quantizated mm + return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) + else: + # dynamic quantization input_tensor = weight_qtensor.input_quant_func(input_tensor) input_tensor = input_tensor.dequantize() weight_tensor = weight_qtensor.dequantize() @@ -777,6 +843,7 @@ def to(self, *args, **kwargs): self.shape, self.quant_min, self.quant_max, + self.zero_point_domain, self.input_quant_func, **kwargs, ) @@ -790,6 +857,7 @@ def _apply_fn_to_data(self, fn): self.shape, self.quant_min, self.quant_max, + self.zero_point_domain, self.input_quant_func, dtype=self.dtype, ) From 3dd16c95698604b3841bb463b81dde80aa5928b6 Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Wed, 15 May 2024 17:32:08 -0400 Subject: [PATCH 34/61] Fix CI after quantize op change in PyTorch core (#244) Summary: https://github.com/pytorch/pytorch/pull/125781 recently changed the numerics of the quantize op subtly. This commit fixes the numerics mismatch caused by this PR by making our quantize ops consistent with the ones in core. Test Plan: python test/quantization/test_quant_primitives.py -k test_quantize_dequantize_group_sym python test/quantization/test_quant_api.py TestQuantFlow.test_quantized_tensor_subclass_8da4w Reviewers: jerryzh168, cpuhrsch Subscribers: jerryzh168, cpuhrsch, supriyar --- test/integration/test_integration.py | 2 +- test/quantization/test_qat.py | 16 ++++++++-------- test/quantization/test_quant_primitives.py | 2 +- torchao/quantization/prototype/qat.py | 2 +- torchao/quantization/quant_primitives.py | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index e6da3e7340..d701177016 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1124,7 +1124,7 @@ def test_weight_only_quant_force_mixed_mm(self, device, dtype): m_c = torch.compile(m, mode="max-autotune") y_wo, (code,) = run_and_get_code(m_c, x) sqnr = compute_error(y_ref, y_wo) - self.assertGreater(sqnr, 43.0) + self.assertGreaterEqual(sqnr, 42.75) if device == "cuda": self.assertTrue("mixed_mm" in code) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index a0587d3ff0..fe2db8066a 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -18,7 +18,7 @@ fake_quantize_per_token, ) from torchao.quantization.quant_primitives import get_group_qparams_symmetric -from torchao.quantization.utils import TORCH_VERSION_AFTER_2_3 +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 # TODO: put this in a common test utils file @@ -58,7 +58,7 @@ def _get_qmin_qmax(self, n_bit: int): qmax = 2 ** (n_bit - 1) - 1 return (qmin, qmax) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_fake_quantize_per_channel_group(self): n_bit = 4 (qmin, qmax) = self._get_qmin_qmax(n_bit) @@ -84,7 +84,7 @@ def test_fake_quantize_per_channel_group(self): ) torch.testing.assert_close(out, out_ptq, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_fake_quantize_per_token(self): (qmin, qmax) = self._get_qmin_qmax(8) @@ -130,7 +130,7 @@ def _set_ptq_weight( ptq_linear.scales = s ptq_linear.zeros = zp - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_qat_8da4w_linear(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATLinear from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear @@ -155,7 +155,7 @@ def test_qat_8da4w_linear(self): ptq_out = ptq_linear(x2) torch.testing.assert_close(ptq_out, qat_out, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_qat_8da4w_quantizer(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATQuantizer from torchao.quantization.GPTQ import Int8DynActInt4WeightQuantizer @@ -189,7 +189,7 @@ def test_qat_8da4w_quantizer(self): for k in ptq_state_dict.keys(): torch.testing.assert_close(ptq_state_dict[k], converted_state_dict[k], atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_qat_8da4w_quantizer_meta_weights(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATQuantizer @@ -201,7 +201,7 @@ def test_qat_8da4w_quantizer_meta_weights(self): qat_model = qat_quantizer.prepare(m) self.assertTrue(all(v.is_meta for v in qat_model.state_dict().values())) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_qat_8da4w_quantizer_disable_fake_quant(self): """ Test that 8da4w QAT with disabled fake quant matches nn.Linear in forward. @@ -254,7 +254,7 @@ def test_qat_8da4w_quantizer_disable_fake_quant(self): qat_out2 = qat_model2(*x2) torch.testing.assert_close(qat_out, qat_out2, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_qat_8da4w_quantizer_disable_fake_quant_backward(self): """ Test that 8da4w QAT with disabled fake quant matches nn.Linear in backward. diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index a64439a25e..0fb48d761b 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -156,7 +156,7 @@ def test_quantize_activation_per_token_abs_max_zero_input(self): quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "skipping when torch verion is 2.3 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_quantize_dequantize_group_sym(self): input = torch.randn(10, 10) mapping_type = MappingType.SYMMETRIC diff --git a/torchao/quantization/prototype/qat.py b/torchao/quantization/prototype/qat.py index d15e841d74..314543bb8e 100644 --- a/torchao/quantization/prototype/qat.py +++ b/torchao/quantization/prototype/qat.py @@ -209,7 +209,7 @@ def forward(ctx, input, scales, zero_points, quant_min, quant_max): # which rounds first before adding the zero points. However, this # is what `quantize_per_channel_group` and `quantize_per_token` # do and here we try to match that behavior as closely as possible. - q = input.div(scales).add(zero_points).round() + q = input.mul(1.0 / scales).add(zero_points).round() dq = q.clamp(quant_min, quant_max).sub(zero_points).mul(scales) # TODO: do we need this mask? mask = torch.logical_and((q >= quant_min), (q <= quant_max)) diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 4f39a6055d..30c6854480 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -201,7 +201,7 @@ def quantize_affine( if zero_point_domain == ZeroPointDomain.INT: quant = torch.clamp( - torch.round(input / scale) + zero_point, quant_min, quant_max + torch.round(input * (1.0 / scale)) + zero_point, quant_min, quant_max ).to(output_dtype) else: assert zero_point_domain == ZeroPointDomain.FLOAT From cae3d823cec4eb9ad781d9e589f1487e79c9286f Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Wed, 15 May 2024 19:17:31 -0400 Subject: [PATCH 35/61] Match torch.fake_quantize numerics in 8da4w QAT (#229) Summary: There are two subtle differences between the 8da4w quant primitives and `torch.fake_quantize_per_channel_affine` today: 1. 8da4w uses float32 zero points torch.fake_quantize uses int32 zero points 2. 8da4w uses input.div(scales) torch.fake_quantize uses input.mul(1.0 / scales) Of these two differences, the second one is smaller and only resulted in 0.1% elements mismatched in unit tests, but it is a source of numerical divergence nonetheless. This commit changes 8da4w QAT quant primitives to match the torch.fake_quantize behavior for both of these differences. In a future commit, we will change the 8da4w PTQ quant primitives as well so PTQ and QAT remain consistent. Note: This commit also has the side effect of reducing memory footprint significantly for bf16 inputs. We now cast them to fp32 before multiplying them with fp32 scales. This reduced memory usage presumably because bf16 * fp32 kernels are not as memory efficient. Test Plan: python test/quantization/test_qat.py -k test_qat_generic_fake_quantize Reviewers: jerryzh168, cpuhrsch Subscribers: jerryzh168, cpuhrsch, supriyar --- test/quantization/test_qat.py | 45 +++++++++++++---- torchao/quantization/prototype/qat.py | 64 ++++++++++++++++-------- torchao/quantization/quant_primitives.py | 2 +- 3 files changed, 78 insertions(+), 33 deletions(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index fe2db8066a..93323df0f1 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -14,6 +14,7 @@ from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 from torchao.quantization.prototype.qat import ( _choose_qparams_per_token_asymmetric, + _GenericFakeQuantize, fake_quantize_per_channel_group, fake_quantize_per_token, ) @@ -58,7 +59,7 @@ def _get_qmin_qmax(self, n_bit: int): qmax = 2 ** (n_bit - 1) - 1 return (qmin, qmax) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_fake_quantize_per_channel_group(self): n_bit = 4 (qmin, qmax) = self._get_qmin_qmax(n_bit) @@ -67,6 +68,7 @@ def test_fake_quantize_per_channel_group(self): torch.manual_seed(self.SEED) x = torch.randn(100, 256).requires_grad_() (s, zp) = get_group_qparams_symmetric(x, n_bit, group_size) + zp = zp.to(torch.int32) x2 = copy.deepcopy(x) # fake quant op @@ -84,7 +86,7 @@ def test_fake_quantize_per_channel_group(self): ) torch.testing.assert_close(out, out_ptq, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_fake_quantize_per_token(self): (qmin, qmax) = self._get_qmin_qmax(8) @@ -92,10 +94,7 @@ def test_fake_quantize_per_token(self): x = torch.randn(100, 256).requires_grad_() x2 = copy.deepcopy(x) # TODO: use torch.ops.aten.quantized_decomposed version instead - (s, zp) = _choose_qparams_per_token_asymmetric( - x, - torch.int8, # not used - ) + (s, zp) = _choose_qparams_per_token_asymmetric(x, torch.float32, torch.int32) # fake quant op out = fake_quantize_per_token(x, s, zp, qmin, qmax) @@ -130,7 +129,7 @@ def _set_ptq_weight( ptq_linear.scales = s ptq_linear.zeros = zp - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_qat_8da4w_linear(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATLinear from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear @@ -155,7 +154,7 @@ def test_qat_8da4w_linear(self): ptq_out = ptq_linear(x2) torch.testing.assert_close(ptq_out, qat_out, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_qat_8da4w_quantizer(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATQuantizer from torchao.quantization.GPTQ import Int8DynActInt4WeightQuantizer @@ -189,7 +188,7 @@ def test_qat_8da4w_quantizer(self): for k in ptq_state_dict.keys(): torch.testing.assert_close(ptq_state_dict[k], converted_state_dict[k], atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_qat_8da4w_quantizer_meta_weights(self): from torchao.quantization.prototype.qat import Int8DynActInt4WeightQATQuantizer @@ -201,7 +200,7 @@ def test_qat_8da4w_quantizer_meta_weights(self): qat_model = qat_quantizer.prepare(m) self.assertTrue(all(v.is_meta for v in qat_model.state_dict().values())) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_qat_8da4w_quantizer_disable_fake_quant(self): """ Test that 8da4w QAT with disabled fake quant matches nn.Linear in forward. @@ -254,7 +253,7 @@ def test_qat_8da4w_quantizer_disable_fake_quant(self): qat_out2 = qat_model2(*x2) torch.testing.assert_close(qat_out, qat_out2, atol=0, rtol=0) - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") def test_qat_8da4w_quantizer_disable_fake_quant_backward(self): """ Test that 8da4w QAT with disabled fake quant matches nn.Linear in backward. @@ -299,6 +298,30 @@ def test_qat_8da4w_quantizer_disable_fake_quant_backward(self): torch.testing.assert_close(nn_model.linear2.weight, qat_model.linear2.weight, atol=0, rtol=0) torch.testing.assert_close(nn_model.sub.linear.weight, qat_model.sub.linear.weight, atol=0, rtol=0) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch version is 2.4 or lower") + def test_qat_generic_fake_quantize(self): + """ + Test that the generic fake quantize used in 8da4w QAT matches + the numerics of existing fake quantize ops in Pytorch in both + the forward and the backward passes. + """ + (qmin, qmax) = self._get_qmin_qmax(4) + py_input = torch.randn(16, 64).float().requires_grad_() + py_s = torch.randn(16).float() + py_zp = torch.randint(qmax, size=(16,), dtype=torch.int32) + py_out = torch.fake_quantize_per_channel_affine(py_input, py_s, py_zp, 0, qmin, qmax) + py_out.sum().backward() + + ao_input = copy.deepcopy(py_input) + ao_input.grad.data.zero_() + ao_s = copy.deepcopy(py_s).reshape(-1, 1) + ao_zp = copy.deepcopy(py_zp).reshape(-1, 1) + ao_out = _GenericFakeQuantize.apply(ao_input, ao_s, ao_zp, qmin, qmax) + ao_out.sum().backward() + + torch.testing.assert_close(py_out, ao_out, atol=0, rtol=0) + torch.testing.assert_close(py_input.grad, ao_input.grad, atol=0, rtol=0) + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/prototype/qat.py b/torchao/quantization/prototype/qat.py index 314543bb8e..6cda8eeee0 100644 --- a/torchao/quantization/prototype/qat.py +++ b/torchao/quantization/prototype/qat.py @@ -4,18 +4,18 @@ # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Optional, Tuple +from typing import Any, Tuple import torch from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib from torch.library import impl -from torchao.quantization.utils import TORCH_VERSION_AFTER_2_3 +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 from torchao.quantization.quant_primitives import get_group_qparams_symmetric from torchao.quantization.unified import TwoStepQuantizer -if TORCH_VERSION_AFTER_2_3: +if TORCH_VERSION_AFTER_2_4: from torchao.quantization.GPTQ import ( _replace_linear_8da4w, Int8DynActInt4WeightLinear, @@ -54,7 +54,7 @@ def prepare( self.precision, self.scales_precision, Int8DynActInt4WeightQATLinear, - copy_weights = True, + copy_weights=True, ) return model @@ -95,7 +95,7 @@ def _convert_qat_linear_8da4w(module: torch.nn.Module): quantized_linear.zeros = zp else: _convert_qat_linear_8da4w(child) - + class Int8DynActInt4WeightQATLinear(torch.nn.Linear): """ This module implements a linear layer with int8 dynamic per token fake @@ -131,6 +131,8 @@ def __init__( self.groupsize = groupsize self.precision = precision self.scales_precision = scales_precision + # TODO: make this configurable? + self.zero_points_precision = torch.int32 self._fake_quant_enabled = True def enable_fake_quant(self, enabled: bool = True): @@ -142,8 +144,8 @@ def disable_fake_quant(self): def forward(self, x: torch.Tensor) -> torch.Tensor: # activations: int8 dynamic asymmetric quant if self._fake_quant_enabled: - (act_scales, act_zp) =_choose_qparams_per_token_asymmetric( - x, torch.int8, # dtype not used + (act_scales, act_zp) = _choose_qparams_per_token_asymmetric( + x, self.scales_precision, self.zero_points_precision, ) (act_qmin, act_qmax) = self._get_qmin_qmax(8) x_fq = fake_quantize_per_token( @@ -157,6 +159,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: (weight_scales, weight_zp) = get_group_qparams_symmetric( self.weight, 4, self.groupsize, self.scales_precision, ) + # TODO: pass zp dtype to `get_group_qparams_symmetric` instead + weight_zp = weight_zp.to(self.zero_points_precision) (weight_qmin, weight_qmax) = self._get_qmin_qmax(4) w_fq = fake_quantize_per_channel_group( self.weight, @@ -190,6 +194,20 @@ def disable_8da4w_fake_quant(mod: torch.nn.Module): if isinstance(mod, Int8DynActInt4WeightQATLinear): mod.disable_fake_quant() +else: # not TORCH_VERSION_AFTER_2_4 + + class Int8DynActInt4WeightQATQuantizer: + def __init__(*args, **kwargs): + raise ValueError( + "Int8DynActInt4WeightQATQuantizer is only supported after PyTorch 2.4+" + ) + + class Int8DynActInt4WeightQATLinear: + def __init__(*args, **kwargs): + raise ValueError( + "Int8DynActInt4WeightQATLinear is only supported after PyTorch 2.4+" + ) + # ======================== # | QUANT PRIMITIVES | @@ -205,13 +223,14 @@ class _GenericFakeQuantize(torch.autograd.Function): @staticmethod def forward(ctx, input, scales, zero_points, quant_min, quant_max): - # Note: this diverges from `torch.fake_quantize_per_channel_affine`, - # which rounds first before adding the zero points. However, this - # is what `quantize_per_channel_group` and `quantize_per_token` - # do and here we try to match that behavior as closely as possible. - q = input.mul(1.0 / scales).add(zero_points).round() + # Note: for bf16 inputs, casting them to fp32 has the unexpected + # side effect of reducing memory footprint significantly, presumably + # because bf16 * fp32 kernels are not as memory efficient + assert input.dtype == torch.float32 + assert scales.dtype == torch.float32 + assert zero_points.dtype == torch.int32 + q = input.mul(1.0 / scales).round().add(zero_points) dq = q.clamp(quant_min, quant_max).sub(zero_points).mul(scales) - # TODO: do we need this mask? mask = torch.logical_and((q >= quant_min), (q <= quant_max)) ctx.save_for_backward(mask) return dq @@ -239,14 +258,13 @@ def fake_quantize_per_channel_group( assert group_size > 1 assert input.shape[-1] % group_size == 0 assert input.dim() == 2 - assert torch.isnan(input).sum() == 0 - grouped_input = input.reshape(-1, group_size) + grouped_input = input.reshape(-1, group_size).to(torch.float32) scales = scales.reshape(-1, 1) zero_points = zero_points.reshape(-1, 1) fq = _GenericFakeQuantize.apply( grouped_input, scales, zero_points, quant_min, quant_max, ) - return fq.reshape_as(input) + return fq.reshape_as(input).to(input.dtype) # TODO: move this to core quantized_decomposed_lib.define( @@ -266,9 +284,11 @@ def fake_quantize_per_token( from torch.ao.quantization.fx._decomposed import _per_token_quant_qparam_dim_check _per_token_quant_qparam_dim_check(input, scales, zero_points) - return _GenericFakeQuantize.apply( - input, scales, zero_points, quant_min, quant_max, + fq_input = input.to(torch.float32) + fq = _GenericFakeQuantize.apply( + fq_input, scales, zero_points, quant_min, quant_max, ) + return fq.reshape_as(input).to(input.dtype) # TODO: This is copied from torch/ao/quantization/fx/_decomposed.py. # The version in pytorch does not have backward support yet so we add @@ -276,7 +296,8 @@ def fake_quantize_per_token( # is landed. def _choose_qparams_per_token_asymmetric( input: torch.Tensor, - dtype: torch.dtype, + scales_precision: torch.dtype = torch.float32, + zero_points_precision: torch.dtype = torch.float32, ) -> Tuple[torch.Tensor, torch.Tensor]: """Choose quantization parameters for per token quantization. This means for a N dimension Tensor (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize @@ -285,7 +306,8 @@ def _choose_qparams_per_token_asymmetric( Args: input (torch.Tensor): original float32/float16 Tensor - dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor + scales_precision (torch.dtype): precision of returned scales + zero_points_precision (torch.dtype): precision of returned zero points Returns: scales and zero_points, both float32 Tensors @@ -314,4 +336,4 @@ def _choose_qparams_per_token_asymmetric( ) zero_point = torch.clamp(zero_point, qmin, qmax).round() - return scale.to(torch.float32), zero_point.to(torch.float32) + return scale.to(scales_precision), zero_point.to(zero_points_precision) diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index 30c6854480..e1de871e24 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -764,7 +764,7 @@ def groupwise_affine_dequantize_tensor( ) -# TODO: replace this with torch.ao.quantization.PerChannelMinMaxObserver +# TODO: separate scale and zero point precision def get_group_qparams_symmetric(w, n_bit=4, groupsize=128, precision=torch.float32): # needed for GPTQ with padding if groupsize > w.shape[-1]: From cda787ccc79664ccdc0ec10cf6ba06e87b9a079c Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Wed, 15 May 2024 17:45:51 -0700 Subject: [PATCH 36/61] Remove input_quant_func from AffineQuantizedTensor subclass (#243) * Remove input_quant_func from AffineQuantizedTensor subclass Summary: Currently we have a input_quant_func in the AffineQuantizedTensor, which is a bit convoluted, we want to use a separate LinearActAffineQuantizedTensor subclass for activation quantization (dynamic quantization) instead Test Plan: python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_8da4w Reviewers: Subscribers: Tasks: Tags: * Add dispatch for dynamic quantization in `AffineQuantizedTensor` Summary: This PR added dispatch for int8act-int8 weight dynamic quantization that's calling `int_scaled_matmul` kernel in the end Test Plan: python test/quantization/test_quant_api.py -k test_quantized_tensor_subclass_int8_dyn_quant Reviewers: Subscribers: Tasks: Tags: * Fix test --- test/quantization/test_quant_api.py | 86 ++++++++-- torchao/quantization/subclass.py | 257 +++++++++++++++++++++++----- 2 files changed, 286 insertions(+), 57 deletions(-) diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index cea659e61d..fcab07c913 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -395,7 +395,10 @@ def test_eval_wrapper(self): # TODO: move to a separate test file @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") def test_quantized_tensor_subclass_8da4w(self): - from torchao.quantization.subclass import AffineQuantizedTensor + from torchao.quantization.subclass import ( + AffineQuantizedTensor, + LinearActQuantizedTensor, + ) from torchao.quantization.quant_primitives import MappingType import copy @@ -409,6 +412,7 @@ def test_quantized_tensor_subclass_8da4w(self): quant_max = 7 # TODO: make a general helper function? + # input settings def get_per_token_block_size(x): block_size = [] for i in range(len(x.shape)-1): @@ -421,13 +425,18 @@ def get_per_token_block_size(x): input_target_dtype = torch.int8 input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) + def dynamic_quant(linear): + # note: order is important + linear.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(linear.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps), requires_grad=False) + linear.weight = torch.nn.Parameter(LinearActQuantizedTensor.from_float(linear.weight, input_quant_func), requires_grad=False) + m = ToyLinearModel().eval() m_copy = copy.deepcopy(m) example_inputs = m.example_inputs() - m.linear1.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(m.linear1.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, input_quant_func=input_quant_func), requires_grad=False) - m.linear2.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(m.linear2.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, input_quant_func=input_quant_func), requires_grad=False) - assert isinstance(m.linear1.weight, AffineQuantizedTensor) - assert isinstance(m.linear2.weight, AffineQuantizedTensor) + dynamic_quant(m.linear1) + dynamic_quant(m.linear2) + assert isinstance(m.linear1.weight, LinearActQuantizedTensor) + assert isinstance(m.linear2.weight, LinearActQuantizedTensor) # reference from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer @@ -461,9 +470,6 @@ def test_quantized_tensor_subclass_int4(self): preserve_zero = False zero_point_dtype = torch.bfloat16 - # weight only quantization - input_quant_func = None - # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") m_copy = copy.deepcopy(m) @@ -475,7 +481,6 @@ def to_quantized(weight): zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=ZeroPointDomain.FLOAT, - input_quant_func=input_quant_func, ) m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) @@ -506,16 +511,13 @@ def test_quantized_tensor_subclass_int8(self): eps = torch.finfo(torch.float32).eps zero_point_dtype = torch.int64 - # weight only quantization - input_quant_func = None - m = ToyLinearModel().eval().to(torch.bfloat16) m_copy = copy.deepcopy(m) example_inputs = tuple(map(lambda x: x.to(torch.bfloat16), m.example_inputs())) def to_quantized(weight): block_size = (1, weight.shape[1]) - return AffineQuantizedTensor.from_float(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype, input_quant_func=input_quant_func) + return AffineQuantizedTensor.from_float(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) m.linear2.weight = torch.nn.Parameter(to_quantized(m.linear2.weight), requires_grad=False) @@ -532,5 +534,63 @@ def to_quantized(weight): torch.testing.assert_close(res, ref, rtol=0.00001, atol=1e-2) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + def test_quantized_tensor_subclass_int8_dyn_quant(self): + from torchao.quantization.subclass import AffineQuantizedTensor + from torchao.quantization.subclass import LinearActQuantizedTensor + from torchao.quantization.quant_primitives import MappingType + from torchao.quantization.quant_primitives import ZeroPointDomain + import copy + + # weight settings + mapping_type = MappingType.SYMMETRIC + def get_weight_block_size(x): + return (1, x.shape[1]) + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + zero_point_dtype = torch.int64 + + # input settings + def get_per_token_block_size(x): + block_size = list(x.shape) + for i in range(len(block_size)-1): + block_size[i] = 1 + return block_size + + input_mapping_type = MappingType.SYMMETRIC + input_target_dtype = torch.int8 + input_eps = 1e-5 + input_quant_min = -127 + input_quant_max = 127 + input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float) + + # use 1024 so that we don't need padding + m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") + m_copy = copy.deepcopy(m) + example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) + + def dynamic_quant(linear): + # note: order is important + linear.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(linear.weight, mapping_type, get_weight_block_size(linear.weight), target_dtype, eps=eps, zero_point_dtype=zero_point_dtype), requires_grad=False) + linear.weight = torch.nn.Parameter(LinearActQuantizedTensor.from_float(linear.weight, input_quant_func), requires_grad=False) + + dynamic_quant(m.linear1) + dynamic_quant(m.linear2) + assert isinstance(m.linear1.weight, LinearActQuantizedTensor) + assert isinstance(m.linear2.weight, LinearActQuantizedTensor) + assert isinstance(m.linear1.weight.original_weight_tensor, AffineQuantizedTensor) + assert isinstance(m.linear2.weight.original_weight_tensor, AffineQuantizedTensor) + + # reference + from torchao.quantization.quant_api import change_linear_weights_to_int8_dqtensors + change_linear_weights_to_int8_dqtensors(m_copy) + + res = m(*example_inputs) + ref = m_copy(*example_inputs) + + self.assertTrue(torch.equal(res, ref)) + + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 607cb77766..bc40ffeaff 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -21,7 +21,9 @@ quantize_affine, dequantize_affine, ZeroPointDomain, + MappingType, ) +from torchao.kernel.intmm import int_scaled_matmul from .utils import find_multiple from typing import Tuple, Optional, Callable @@ -36,6 +38,30 @@ aten = torch.ops.aten +def _aqt_is_int8(aqt): + """Check if an AffineQuantizedTensor is int8 quantized Tensor""" + return ( + aqt.int_data.dtype == torch.int8 and + aqt.quant_min is None or aqt.quant_min == -128 and + aqt.quant_max is None or aqt.quant_max == 127 + ) + +def _aqt_is_int8_reduced_range(aqt): + return ( + aqt.int_data.dtype == torch.int8 and + aqt.quant_min == -127 and + aqt.quant_max is None or aqt.quant_max == 127 + ) + +def _aqt_is_uint4(aqt): + """Check if an AffineQuantizedTensor is uint4 quantized Tensor""" + # TODO: use torch.uint4 + return ( + aqt.int_data.dtype == torch.int32 and + aqt.quant_min is None or aqt.quant_min == 0 and + aqt.quant_max is None or aqt.quant_max == 15 + ) + class QuantizedLinearWeightBase(torch.Tensor): """ @@ -643,7 +669,6 @@ def __new__( quant_min: Optional[int] = None, quant_max: Optional[int] = None, zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, - input_quant_func: Optional[Callable] = None, dtype=None, # TODO: remove args and kwargs *args, @@ -670,7 +695,6 @@ def __init__( quant_min: Optional[int] = None, quant_max: Optional[int] = None, zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, - input_quant_func: Optional[Callable] = None, dtype=None, *args, **kwargs @@ -682,12 +706,11 @@ def __init__( self.quant_min = quant_min self.quant_max = quant_max self.zero_point_domain = zero_point_domain - self.input_quant_func = input_quant_func def __repr__(self): return ( f"{self.__class__.__name__}(data={self.dequantize()}, shape={self.shape}, " - f"device={self.device}, dtype={self.dtype}, input_quant_func={self.input_quant_func}, requires_grad={self.requires_grad})" + f"device={self.device}, dtype={self.dtype}, requires_grad={self.requires_grad})" ) def dequantize(self, output_dtype=None): @@ -696,14 +719,14 @@ def dequantize(self, output_dtype=None): return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, self.zero_point_domain, output_dtype=output_dtype) def __tensor_flatten__(self): - return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.input_quant_func, self.dtype] + return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype] @classmethod def __tensor_unflatten__( cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride ): int_data, scale, zero_point = tensor_data_dict["int_data"], tensor_data_dict["scale"], tensor_data_dict["zero_point"] - block_size, shape, quant_min, quant_max, zero_point_domain, input_quant_func, dtype = tensor_attributes + block_size, shape, quant_min, quant_max, zero_point_domain, dtype = tensor_attributes return cls( int_data, scale, @@ -713,7 +736,6 @@ def __tensor_unflatten__( quant_min, quant_max, zero_point_domain, - input_quant_func=input_quant_func, dtype=dtype, strides=outer_stride, ) @@ -730,7 +752,6 @@ def from_float( eps = None, scale_dtype = None, zero_point_dtype = None, - input_quant_func = None, preserve_zero = True, zero_point_domain = ZeroPointDomain.INT, ): @@ -745,7 +766,6 @@ def from_float( quant_min, quant_max, zero_point_domain, - input_quant_func=input_quant_func, dtype=input_float.dtype ) @@ -759,27 +779,63 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): args[1], args[2] if len(args) > 2 else None, ) - if weight_qtensor.input_quant_func is None: - is_cuda = args[0].is_cuda - is_cpu = args[0].device == torch.device("cpu") - # weight only quantization - is_int8 = ( - weight_qtensor.int_data.dtype == torch.int8 and - weight_qtensor.quant_min is None or weight_qtensor.quant_min == -128 and - weight_qtensor.quant_max is None or weight_qtensor.quant_max == 127 - ) - is_uint4 = ( - weight_qtensor.int_data.dtype == torch.int32 and - weight_qtensor.quant_min == 0 and - weight_qtensor.quant_max == 15 - ) + is_cuda = weight_qtensor.is_cuda + is_cpu = weight_qtensor.device == torch.device("cpu") + if isinstance(weight_qtensor, AffineQuantizedTensor): + weight_is_int8 = _aqt_is_int8(weight_qtensor) + weight_is_uint4 = _aqt_is_uint4(weight_qtensor) + + if isinstance(input_tensor, AffineQuantizedTensor): + # if input tensor is quantized, either dispatch to the int8 mm kernel + # or just dequantize the input tensor + input_is_int8 = _aqt_is_int8_reduced_range(input_tensor) + input_tensor_dtype_is_expected = input_tensor.dtype in [ + torch.float, + torch.bfloat16 + ] + if ( + is_cuda and + input_is_int8 and + input_tensor_dtype_is_expected + ): + # + # 1. do the matrix form of dot(X_i, W_j) + # + # + # 2. rescale the output + # + # in cases with large matrices, y_dot_int32 can grow sufficiently + # large that y_dot_int32 * a float16 scale is greater than the maximum + # value of a float 16, (which results in a value of inf even if multiplying + # by the other scale would bring it within the expected range) + + x_vals_int8 = input_tensor.int_data + x_scales = input_tensor.scale + w_vals_int8_t = weight_qtensor.int_data.contiguous().t() + w_scales = weight_qtensor.scale + tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) + y_dot_scaled = int_scaled_matmul(tmp, w_vals_int8_t, x_scales.reshape(-1, 1)) + + y = (y_dot_scaled * w_scales).reshape( + *x_vals_int8.shape[:-1], y_dot_scaled.shape[-1] + ) + + # can downcast only at the very end + output_dtype = input_tensor.dtype + y = y.to(output_dtype) + if bias is not None: + y += bias + return y + else: + input_tensor = input_tensor.dequantize() + # weight only quantization # TODO: enable cpu and mps path as well # TODO: make sure weight dimension matches the expectation of the int4mm kernel # TODO: move this to TinygemmAffineQuantizedTensor if ( is_cuda and - is_uint4 and + weight_is_uint4 and weight_qtensor.dtype == torch.bfloat16 and len(weight_qtensor.shape) == 2 and weight_qtensor.block_size[0] == 1 and @@ -796,7 +852,7 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) elif ( is_cpu and - is_int8 and + weight_is_int8 and len(weight_qtensor.shape) == 2 and len(weight_qtensor.block_size) == 2 and weight_qtensor.block_size[0] == 1 and @@ -805,18 +861,16 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): # TODO: enable mps path as well # per channel int8 weight only quantizated mm return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) + else: + weight_tensor = weight_qtensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) else: - # dynamic quantization - input_tensor = weight_qtensor.input_quant_func(input_tensor) - input_tensor = input_tensor.dequantize() - weight_tensor = weight_qtensor.dequantize() - return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + if isinstance(input_tensor, AffineQuantizedTensor): + input_tensor = input_tensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) - try: - with torch._C.DisableTorchFunctionSubclass(): - return func(*args, **kwargs) - except: - print(f"ERR: subclass doesn't implement {func}") + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) def _get_to_kwargs(self, *args, **kwargs): @@ -844,7 +898,6 @@ def to(self, *args, **kwargs): self.quant_min, self.quant_max, self.zero_point_domain, - self.input_quant_func, **kwargs, ) @@ -858,7 +911,6 @@ def _apply_fn_to_data(self, fn): self.quant_min, self.quant_max, self.zero_point_domain, - self.input_quant_func, dtype=self.dtype, ) @@ -900,16 +952,10 @@ def __torch_dispatch__(cls, func, types, args, kwargs): args[1], None if len(args) == 2 else args[2], ) - if weight_qtensor.input_quant_func is not None: - # dynamic quantization - input_tensor = weight_qtensor.input_quant_func(input_tensor) - input_tensor = input_tensor.dequantize() weight_tensor = weight_qtensor.dequantize() return func(input_tensor, weight_tensor, bias) - if (func is aten.detach.default or - func is aten.clone.default or - func is aten._to_copy.default): + if func is aten.detach.default: return return_and_correct_aliasing( func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) ) @@ -933,3 +979,126 @@ def __torch_dispatch__(cls, func, types, args, kwargs): kwargs, args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), ) + + raise NotImplementedError( + f"AffineQuantizedTensor dispatch: attempting to run {func}, this is not supported" + ) + + +class LinearActQuantizedTensor(torch.Tensor): + """ + Applies activation quantization for linear operator + """ + def __new__( + cls, + original_weight_tensor: torch.Tensor, + input_quant_func: Callable, + ): + kwargs = {} + dtype = original_weight_tensor.dtype + kwargs["dtype"] = dtype + kwargs["requires_grad"] = False + shape = original_weight_tensor.shape + return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] + + def __init__( + self, + original_weight_tensor: torch.Tensor, + input_quant_func: Callable, + ): + self.original_weight_tensor = original_weight_tensor + self.input_quant_func = input_quant_func + + def __tensor_flatten__(self): + return ["original_weight_tensor"], [self.input_quant_func] + + @classmethod + def __tensor_unflatten__( + cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride + ): + original_weight_tensor = tensor_data_dict["original_weight_tensor"] + input_quant_func = tensor_attributes + return cls( + original_weight_tensor, + input_quant_func, + ) + + @classmethod + def from_float( + cls, + input_float, + input_quant_func, + ): + return cls( + input_float, + input_quant_func, + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + kwargs = {} if kwargs is None else kwargs + + if func is torch.nn.functional.linear: + input_tensor, weight_tensor, bias = ( + args[0], + args[1], + args[2] if len(args) > 2 else None, + ) + if isinstance(weight_tensor, LinearActQuantizedTensor): + input_quant_func = weight_tensor.input_quant_func + original_weight_tensor = weight_tensor.original_weight_tensor + aqt = input_quant_func(input_tensor) + return torch.nn.functional.linear(aqt, original_weight_tensor, bias) + + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) + + def _apply_fn_to_data(self, fn): + return self.__class__( + fn(self.original_weight_tensor), + self.input_quant_func, + ) + + def __torch_dispatch__(cls, func, types, args, kwargs): + if ( + func in [aten.mm.default, aten.addmm.default] + and args[0].is_floating_point() + ): + if func == aten.addmm.default: + assert args[1].shape[-1] == args[2].shape[0], ( + f"need mat1 shape: {args[1].shape} final" + f"dim to match mat2 shape: {args[2].shape} first dim " + ) + input_tensor, weight_qtensor, bias = ( + args[1], + args[2], + args[0], + ) + aqt = self.input_quant_func(input_tensor) + return func(bias, aqt, weight_tensor) + else: + assert args[0].shape[-1] == args[1].shape[0], ( + f"need mat1 shape: {args[0].shape} final dim" + f"to match mat2 shape: {args[1].shape} first dim" + ) + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + None if len(args) == 2 else args[2], + ) + aqt = self.input_quant_func(input_tensor) + return func(aqt, weight_tensor, bias) + + if func is aten.detach.default: + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) + ) + + if func is aten.clone.default: + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) + ) + + raise NotImplementedError( + f"LinearActQuantizedTensor dispatch: attempting to run {func}, this is not supported" + ) From 9dbdb2b466cab363a25322237860e935b7888bc4 Mon Sep 17 00:00:00 2001 From: "Jane (Yuan) Xu" <31798555+janeyx99@users.noreply.github.com> Date: Wed, 15 May 2024 21:58:00 -0400 Subject: [PATCH 37/61] Include installing requirements for building from source (#245) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 21a7195c27..150c67b512 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,8 @@ From source ```Shell git clone https://github.com/pytorch/ao cd ao +pip install -r requirements.txt +pip install -r dev-requirements.txt pip install . ``` From 9b25eccd29cb5d9b57aca8f817c290015d2abfd6 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 16 May 2024 12:50:39 -0700 Subject: [PATCH 38/61] Remove args and kwargs from `AffineQuantizedTensor` (#247) Summary: att Test Plan: python test/quantization/test_quant_api.py Reviewers: Subscribers: Tasks: Tags: --- torchao/quantization/subclass.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index bc40ffeaff..f3a1074ba5 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -670,10 +670,9 @@ def __new__( quant_max: Optional[int] = None, zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, dtype=None, - # TODO: remove args and kwargs - *args, - **kwargs + strides=None, ): + kwargs = {} kwargs["device"] = int_data.device kwargs["layout"] = ( kwargs.get("layout") if kwargs.get("layout", False) else int_data.layout @@ -681,7 +680,8 @@ def __new__( if dtype is None: dtype = scale.dtype kwargs["dtype"] = dtype - assert not kwargs.get("requires_grad", False) + if strides is not None: + kwargs["strides"] = strides kwargs["requires_grad"] = False return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] @@ -696,8 +696,7 @@ def __init__( quant_max: Optional[int] = None, zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, dtype=None, - *args, - **kwargs + strides=None, ): self.int_data = int_data self.scale = scale @@ -912,6 +911,7 @@ def _apply_fn_to_data(self, fn): self.quant_max, self.zero_point_domain, dtype=self.dtype, + strides=self.stride(), ) @classmethod From 5741aa254f6c3f43f88819639acc4bca1ae5b4e7 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 17 May 2024 12:28:35 -0700 Subject: [PATCH 39/61] Add to function and decorator for `AffineQuantizedTensor` (#251) Summary: att Next: we can move AffineQuantizedTensor to dtypes and make nf4tensor to use the same implements decorator Test Plan: python test/quantization/test_quant_api.py Reviewers: Subscribers: Tasks: Tags: --- torchao/quantization/subclass.py | 362 +++++++++++++++++++------------ 1 file changed, 218 insertions(+), 144 deletions(-) diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index f3a1074ba5..8d0af8b369 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -25,7 +25,9 @@ ) from torchao.kernel.intmm import int_scaled_matmul from .utils import find_multiple -from typing import Tuple, Optional, Callable +from typing import Tuple, Optional, Callable, Dict, Any +from collections import defaultdict +import functools __all__ = [ @@ -627,6 +629,63 @@ def to_qtensor_components(cls, input_float, groupsize=128, inner_k_tiles=8): int_data = aten._convert_weight_to_int4pack(input_int4x8, inner_k_tiles) return int_data, scales_and_zeros, False, groupsize, inner_k_tiles +def to_aqt( + input_float, + mapping_type, + block_size, + target_dtype, + quant_min = None, + quant_max = None, + eps = None, + scale_dtype = None, + zero_point_dtype = None, + preserve_zero = True, + zero_point_domain = ZeroPointDomain.INT, +): + return AffineQuantizedTensor.from_float( + input_float, + mapping_type, + block_size, + target_dtype, + quant_min=quant_min, + quant_max=quant_max, + eps=eps, + scale_dtype=scale_dtype, + zero_point_dtype=zero_point_dtype, + preserve_zero=preserve_zero, + zero_point_domain=zero_point_domain + ) + +# TODO: merge with nf4 implements decorator +# aten op to their __torch_dispatch__ implemnetations for the tensor subclass +_ATEN_OPS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) + +def implements_aten_ops(cls, aten_ops): + """Use this decorator to implement a function for an aten op in __torch_dispatch__""" + + def decorator(func): + for op in aten_ops: + _ATEN_OPS_TABLE[cls][op] = func + return func + + return decorator + +_TORCH_FUNCTIONS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) + +def implements_torch_function(cls, torch_function): + def decorator(func): + functools.update_wrapper(func, torch_function) + _TORCH_FUNCTIONS_TABLE[cls][torch_function] = func + return func + + return decorator + +def implements_aqt_aten_ops(aten_ops): + return implements_aten_ops(AffineQuantizedTensor, aten_ops) + +def implements_aqt_torch_function(torch_function): + return implements_torch_function(AffineQuantizedTensor, torch_function) + class AffineQuantizedTensor(torch.Tensor): """ @@ -772,101 +831,8 @@ def from_float( def __torch_function__(cls, func, types, args=(), kwargs=None): kwargs = {} if kwargs is None else kwargs - if func is torch.nn.functional.linear: - input_tensor, weight_qtensor, bias = ( - args[0], - args[1], - args[2] if len(args) > 2 else None, - ) - is_cuda = weight_qtensor.is_cuda - is_cpu = weight_qtensor.device == torch.device("cpu") - if isinstance(weight_qtensor, AffineQuantizedTensor): - weight_is_int8 = _aqt_is_int8(weight_qtensor) - weight_is_uint4 = _aqt_is_uint4(weight_qtensor) - - if isinstance(input_tensor, AffineQuantizedTensor): - # if input tensor is quantized, either dispatch to the int8 mm kernel - # or just dequantize the input tensor - input_is_int8 = _aqt_is_int8_reduced_range(input_tensor) - input_tensor_dtype_is_expected = input_tensor.dtype in [ - torch.float, - torch.bfloat16 - ] - if ( - is_cuda and - input_is_int8 and - input_tensor_dtype_is_expected - ): - # - # 1. do the matrix form of dot(X_i, W_j) - # - # - # 2. rescale the output - # - # in cases with large matrices, y_dot_int32 can grow sufficiently - # large that y_dot_int32 * a float16 scale is greater than the maximum - # value of a float 16, (which results in a value of inf even if multiplying - # by the other scale would bring it within the expected range) - - x_vals_int8 = input_tensor.int_data - x_scales = input_tensor.scale - w_vals_int8_t = weight_qtensor.int_data.contiguous().t() - w_scales = weight_qtensor.scale - tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) - y_dot_scaled = int_scaled_matmul(tmp, w_vals_int8_t, x_scales.reshape(-1, 1)) - - y = (y_dot_scaled * w_scales).reshape( - *x_vals_int8.shape[:-1], y_dot_scaled.shape[-1] - ) - - # can downcast only at the very end - output_dtype = input_tensor.dtype - y = y.to(output_dtype) - if bias is not None: - y += bias - return y - else: - input_tensor = input_tensor.dequantize() - - # weight only quantization - # TODO: enable cpu and mps path as well - # TODO: make sure weight dimension matches the expectation of the int4mm kernel - # TODO: move this to TinygemmAffineQuantizedTensor - if ( - is_cuda and - weight_is_uint4 and - weight_qtensor.dtype == torch.bfloat16 and - len(weight_qtensor.shape) == 2 and - weight_qtensor.block_size[0] == 1 and - weight_qtensor.zero_point_domain == ZeroPointDomain.FLOAT - ): - # groupwise int4 quantization - # TODO: currently doing packing on the fly, we'll need to figure out - # the API to do packing before hand - # TODO: expose the arg - innerKTiles = 8 - packed_weight = torch.ops.aten._convert_weight_to_int4pack(weight_qtensor.int_data.to(torch.int32), innerKTiles) - scales_and_zeros = pack_tinygemm_scales_and_zeros(weight_qtensor.scale, weight_qtensor.zero_point) - groupsize = weight_qtensor.block_size[-1] - return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) - elif ( - is_cpu and - weight_is_int8 and - len(weight_qtensor.shape) == 2 and - len(weight_qtensor.block_size) == 2 and - weight_qtensor.block_size[0] == 1 and - weight_qtensor.block_size[1] == weight_qtensor.shape[1] - ): - # TODO: enable mps path as well - # per channel int8 weight only quantizated mm - return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) - else: - weight_tensor = weight_qtensor.dequantize() - return torch.nn.functional.linear(input_tensor, weight_tensor, bias) - else: - if isinstance(input_tensor, AffineQuantizedTensor): - input_tensor = input_tensor.dequantize() - return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + if func in _TORCH_FUNCTIONS_TABLE[cls]: + return _TORCH_FUNCTIONS_TABLE[cls][func](*args, **kwargs) with torch._C.DisableTorchFunctionSubclass(): return func(*args, **kwargs) @@ -927,62 +893,170 @@ def __torch_dispatch__(cls, func, types, args, kwargs): # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized # kernels in CPU as well, see the note above # 2 - we're given non-floats - quantizing long to int8 is crazy - if ( - func in [aten.mm.default, aten.addmm.default] - and args[0].is_floating_point() - and args[0].device == torch.device("cpu") - ): - if func == aten.addmm.default: - assert args[1].shape[-1] == args[2].shape[0], ( - f"need mat1 shape: {args[1].shape} final" - f"dim to match mat2 shape: {args[2].shape} first dim " - ) - input_tensor, weight_qtensor, bias = ( - args[1], - args[2], - args[0], + + if func in _ATEN_OPS_TABLE[cls]: + return _ATEN_OPS_TABLE[cls][func](func, *args, **kwargs) + + raise NotImplementedError( + f"AffineQuantizedTensor dispatch: attempting to run {func}, this is not supported" + ) + +@implements_aqt_torch_function(torch.nn.functional.linear) +def functional_linear(*args, **kwargs): + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + args[2] if len(args) > 2 else None, + ) + is_cuda = weight_qtensor.is_cuda + is_cpu = weight_qtensor.device == torch.device("cpu") + if isinstance(weight_qtensor, AffineQuantizedTensor): + weight_is_int8 = _aqt_is_int8(weight_qtensor) + weight_is_uint4 = _aqt_is_uint4(weight_qtensor) + + if isinstance(input_tensor, AffineQuantizedTensor): + # if input tensor is quantized, either dispatch to the int8 mm kernel + # or just dequantize the input tensor + input_is_int8 = _aqt_is_int8_reduced_range(input_tensor) + input_tensor_dtype_is_expected = input_tensor.dtype in [ + torch.float, + torch.bfloat16 + ] + if ( + is_cuda and + input_is_int8 and + input_tensor_dtype_is_expected + ): + # + # 1. do the matrix form of dot(X_i, W_j) + # + # + # 2. rescale the output + # + # in cases with large matrices, y_dot_int32 can grow sufficiently + # large that y_dot_int32 * a float16 scale is greater than the maximum + # value of a float 16, (which results in a value of inf even if multiplying + # by the other scale would bring it within the expected range) + + x_vals_int8 = input_tensor.int_data + x_scales = input_tensor.scale + w_vals_int8_t = weight_qtensor.int_data.contiguous().t() + w_scales = weight_qtensor.scale + tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) + y_dot_scaled = int_scaled_matmul(tmp, w_vals_int8_t, x_scales.reshape(-1, 1)) + + y = (y_dot_scaled * w_scales).reshape( + *x_vals_int8.shape[:-1], y_dot_scaled.shape[-1] ) + + # can downcast only at the very end + output_dtype = input_tensor.dtype + y = y.to(output_dtype) + if bias is not None: + y += bias + return y else: - assert args[0].shape[-1] == args[1].shape[0], ( - f"need mat1 shape: {args[0].shape} final dim" - f"to match mat2 shape: {args[1].shape} first dim" - ) - input_tensor, weight_qtensor, bias = ( - args[0], - args[1], - None if len(args) == 2 else args[2], - ) + input_tensor = input_tensor.dequantize() + + # weight only quantization + # TODO: enable cpu and mps path as well + # TODO: make sure weight dimension matches the expectation of the int4mm kernel + # TODO: move this to TinygemmAffineQuantizedTensor + if ( + is_cuda and + weight_is_uint4 and + weight_qtensor.dtype == torch.bfloat16 and + len(weight_qtensor.shape) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.zero_point_domain == ZeroPointDomain.FLOAT + ): + # groupwise int4 quantization + # TODO: currently doing packing on the fly, we'll need to figure out + # the API to do packing before hand + # TODO: expose the arg + innerKTiles = 8 + packed_weight = torch.ops.aten._convert_weight_to_int4pack(weight_qtensor.int_data.to(torch.int32), innerKTiles) + scales_and_zeros = pack_tinygemm_scales_and_zeros(weight_qtensor.scale, weight_qtensor.zero_point) + groupsize = weight_qtensor.block_size[-1] + return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) + elif ( + is_cpu and + weight_is_int8 and + len(weight_qtensor.shape) == 2 and + len(weight_qtensor.block_size) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.block_size[1] == weight_qtensor.shape[1] + ): + # TODO: enable mps path as well + # per channel int8 weight only quantizated mm + return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) + else: weight_tensor = weight_qtensor.dequantize() - return func(input_tensor, weight_tensor, bias) + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + else: + if isinstance(input_tensor, AffineQuantizedTensor): + input_tensor = input_tensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + + +@implements_aqt_aten_ops([aten.mm.default, aten.addmm.default]) +def aten_mm(func, *args, **kwargs): + if not args[0].is_floating_point(): + raise NotImplementedError(f"{func} is not implemented for non floating point input") + + if func == aten.addmm.default: + assert args[1].shape[-1] == args[2].shape[0], ( + f"need mat1 shape: {args[1].shape} final" + f"dim to match mat2 shape: {args[2].shape} first dim " + ) + input_tensor, weight_qtensor, bias = ( + args[1], + args[2], + args[0], + ) + else: + assert args[0].shape[-1] == args[1].shape[0], ( + f"need mat1 shape: {args[0].shape} final dim" + f"to match mat2 shape: {args[1].shape} first dim" + ) + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + None if len(args) == 2 else args[2], + ) + weight_tensor = weight_qtensor.dequantize() + return func(input_tensor, weight_tensor, bias) - if func is aten.detach.default: - return return_and_correct_aliasing( - func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) - ) +@implements_aqt_aten_ops([aten.detach.default]) +def detach(func, *args, **kwargs): + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) + ) - if func is aten.clone.default: - return return_and_correct_aliasing( - func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) - ) - if func is aten.t.default: - # TODO: need to implement this - # args[0].transposed = not args[0].transposed - # new = args[0]._change_shape(args[0].shape[::-1]) - # return return_and_correct_aliasing(func, args, kwargs, new) - raise Exception("transpose not implemented yet") +@implements_aqt_aten_ops([aten.clone.default]) +def clone(func, *args, **kwargs): + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) + ) - if func is aten._to_copy.default: - return return_and_correct_aliasing( - func, - args, - kwargs, - args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), - ) - raise NotImplementedError( - f"AffineQuantizedTensor dispatch: attempting to run {func}, this is not supported" - ) +@implements_aqt_aten_ops([aten._to_copy.default]) +def _to_copy(func, *args, **kwargs): + return return_and_correct_aliasing( + func, + args, + kwargs, + args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), + ) + +@implements_aqt_aten_ops([aten.t.default]) +def t(func, *args, **kwargs): + # TODO: need to implement this + # args[0].transposed = not args[0].transposed + # new = args[0]._change_shape(args[0].shape[::-1]) + # return return_and_correct_aliasing(func, args, kwargs, new) + raise Exception("transpose not implemented yet") class LinearActQuantizedTensor(torch.Tensor): From e9e5fae34de1071d39b03fbde97b7f0565b1d02b Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 17 May 2024 20:35:40 -0700 Subject: [PATCH 40/61] Fix quant_primitive dtype that caused perf regression (#253) Summary: API call in quantize_activation_per_token_absmax is not exactly preserving the original code, this PR fixes that Test Plan: need to check perf with torchbenchmarks Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_primitives.py | 16 ++++++++++++++++ torchao/quantization/quant_primitives.py | 4 +++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/test/quantization/test_quant_primitives.py b/test/quantization/test_quant_primitives.py index 0fb48d761b..3f270e70ad 100644 --- a/test/quantization/test_quant_primitives.py +++ b/test/quantization/test_quant_primitives.py @@ -156,6 +156,22 @@ def test_quantize_activation_per_token_abs_max_zero_input(self): quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") + def test_quantize_activation_per_token_abs_max_dtype(self): + from torchao.quantization.quant_primitives import quantize_activation_per_token_absmax + input = torch.zeros(10, 10, dtype=torch.bfloat16) + quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + self.assertTrue(scale_ref.dtype, torch.bfloat16) + + input = torch.zeros(10, 10, dtype=torch.float32) + quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + self.assertTrue(scale_ref.dtype, torch.float32) + + input = torch.zeros(10, 10, dtype=torch.float16) + quantized_ref, scale_ref = quantize_activation_per_token_absmax(input) + self.assertTrue(scale_ref.dtype, torch.float32) + + @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_quantize_dequantize_group_sym(self): input = torch.randn(10, 10) diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index e1de871e24..bc2d44e576 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -416,7 +416,9 @@ def quantize_activation_per_token_absmax(t): # if we don't clamp. TODO(future) look into this further. quant_min = -127 quant_max = 127 - scale, zero_point = choose_qparams_affine(t, mapping_type, block_size, dtype, quant_min, quant_max, eps, scale_dtype=torch.float) + scale_dtype = torch.float32 if t.dtype == torch.float16 else None + + scale, zero_point = choose_qparams_affine(t, mapping_type, block_size, dtype, quant_min, quant_max, eps, scale_dtype=scale_dtype) quantized = quantize_affine(t, block_size, scale, zero_point, dtype, quant_min, quant_max) From e0affd6dc1658b1d0dc152fb4fc96f4895c31869 Mon Sep 17 00:00:00 2001 From: Huy Do Date: Mon, 20 May 2024 11:16:39 -0700 Subject: [PATCH 41/61] Add nightly build workflow (#250) * Add nightly build workflow * Fix wrong env script path * Install pip deps * Try to install gxx_linux-64 and gcc_linux-64 * Another attempt * Set TORCH_CUDA_ARCH_LIST * Does this work? * Attempt to upload to pypi * Pass the arch * Try pypa/gh-action-pypi-publish@release/v1 * Wrong workflow syntax * Try auditwheel * Repair wheel manylinux2014_x86_64 * Debug * Debug * Almost there * Debug * Almost there * Pass -y to uninstall * Final testing * Upload other python versions * Remove debug * Remove build.yml * Add more validations * Run all unit tests * Fix typo * Run only test_ops * Passing the secrets * Move pypi upload to test-infra * Enable CPU build * Switch back to main after landing https://github.com/pytorch/test-infra/pull/5217 * Remove unrelated copy/paste comments --- .github/workflows/build.yml | 71 ------------------------ .github/workflows/build_wheels_linux.yml | 45 +++++++++++++++ packaging/env_var_script_linux.sh | 19 +++++++ packaging/post_build_script.sh | 35 ++++++++++++ packaging/pre_build_script.sh | 14 +++++ packaging/smoke_test.py | 20 +++++++ 6 files changed, 133 insertions(+), 71 deletions(-) delete mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/build_wheels_linux.yml create mode 100644 packaging/env_var_script_linux.sh create mode 100644 packaging/post_build_script.sh create mode 100644 packaging/pre_build_script.sh create mode 100644 packaging/smoke_test.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 8678f4adbf..0000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: PyPI Nightly Build - -on: - schedule: - - cron: '0 0 * * *' # Runs at midnight UTC every day - workflow_dispatch: - inputs: - build-type: - description: 'Choose build type: nightly or release' - required: true - default: 'release' - options: - - nightly - - release - -jobs: - build-and-publish: - runs-on: linux.g5.12xlarge.nvidia.gpu - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - pip install -r requirements.txt - - - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - with: - runner: ${{ matrix.runs-on }} - gpu-arch-type: "cuda" - gpu-arch-version: "12.1" - script: | - conda create -n venv python=3.9 -y - conda activate venv - echo "::group::Install newer objcopy that supports --set-section-alignment" - yum install -y devtoolset-10-binutils - export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH - python -m pip install --upgrade pip - pip install torch - pip install -r requirements.txt - pip install -r dev-requirements.txt - - - name: Build package - run: | - if [ "${{ github.event_name }}" = "schedule" ]; then - export TORCHAO_NIGHTLY=1 - elif [ "${{ github.event.inputs['build-type'] }}" = "nightly" ]; then - export TORCHAO_NIGHTLY=1 - fi - python setup.py sdist bdist_wheel - pytest test --verbose -s - - name: Publish package to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} - repository_url: https://upload.pypi.org/legacy/ - packages_dir: dist/ - - # - name: Open issue on failure - # if: ${{ failure() && github.event_name == 'schedule' }} - # uses: dacbd/create-issue-action@v1 - # with: - # token: ${{ secrets.GITHUB_TOKEN }} - # title: Nightly Build failed - # body: Commit ${{ github.sha }} daily scheduled [CI run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) failed, please check why - # assignees: '' diff --git a/.github/workflows/build_wheels_linux.yml b/.github/workflows/build_wheels_linux.yml new file mode 100644 index 0000000000..d81f2f8434 --- /dev/null +++ b/.github/workflows/build_wheels_linux.yml @@ -0,0 +1,45 @@ +# From https://github.com/pytorch/test-infra/wiki/Using-Nova-Reusable-Build-Workflows +name: Build Linux Wheels + +on: + pull_request: + paths: + - build/packaging/** + - .github/workflows/build_wheels_linux.yml + schedule: + - cron: '0 0 * * *' # Runs at midnight UTC every day + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: wheel + os: linux + with-cpu: enable + with-cuda: enable + with-rocm: disable + + build: + needs: generate-matrix + permissions: + id-token: write + contents: read + uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main + with: + # Set the ref to an empty string instead of the default nightly because + # torchao doesn't have nightly branch setup yet, instead the build is + # triggered daily from main with a schedule + repository: pytorch/ao + ref: "" + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + env-var-script: packaging/env_var_script_linux.sh + pre-script: packaging/pre_build_script.sh + post-script: packaging/post_build_script.sh + smoke-test-script: packaging/smoke_test.py + package-name: torchao + trigger-event: ${{ github.event_name }} + # This is the CUDA version to be uploaded to torchao-nightly pypi + upload-to-pypi: cu121 + secrets: + PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} diff --git a/packaging/env_var_script_linux.sh b/packaging/env_var_script_linux.sh new file mode 100644 index 0000000000..3d3394fbd5 --- /dev/null +++ b/packaging/env_var_script_linux.sh @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# This file is sourced into the environment before building a pip wheel. It +# should typically only contain shell variable assignments. Be sure to export +# any variables so that subprocesses will see them. +if [[ ${CHANNEL:-nightly} == "nightly" ]]; then + export TORCHAO_NIGHTLY=1 +fi + +# Set ARCH list so that we can build fp16 with SM75+, the logic is copied from +# pytorch/builder +TORCH_CUDA_ARCH_LIST="8.0;8.6" +if [[ ${CU_VERSION:-} == "cu124" ]]; then + TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0" +fi diff --git a/packaging/post_build_script.sh b/packaging/post_build_script.sh new file mode 100644 index 0000000000..e036fcbcfe --- /dev/null +++ b/packaging/post_build_script.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +set -eux + +WHEEL_NAME=$(ls dist/) + +pushd dist +# Prepare manywheel +auditwheel repair --plat manylinux2014_x86_64 -w . \ + --exclude libtorch.so \ + --exclude libtorch_python.so \ + --exclude libtorch_cuda.so \ + --exclude libtorch_cpu.so \ + --exclude libc10.so \ + --exclude libc10_cuda.so \ + --exclude libcudart.so.12 \ + --exclude libcudart.so.11.0 \ + "${WHEEL_NAME}" + +ls -lah . +# Clean up the linux_x86_64 wheel +rm "${WHEEL_NAME}" +popd + +MANYWHEEL_NAME=$(ls dist/) +# Try to install the new wheel +pip install "dist/${MANYWHEEL_NAME}" +# and validating it by running the unit tests. Some tests are failing here and +# there, so let's add more of them later +pytest -v test/test_ops.py diff --git a/packaging/pre_build_script.sh b/packaging/pre_build_script.sh new file mode 100644 index 0000000000..366752f7ed --- /dev/null +++ b/packaging/pre_build_script.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +set -eux + +echo "This script is run before building torchao binaries" + +pip install setuptools wheel twine auditwheel +pip install -r requirements.txt +pip install -r dev-requirements.txt diff --git a/packaging/smoke_test.py b/packaging/smoke_test.py new file mode 100644 index 0000000000..2c2c873512 --- /dev/null +++ b/packaging/smoke_test.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import subprocess +import torchao.ops + + +def main(): + """ + Run torchao binary smoke tests like importing and performing simple ops + """ + print(dir(torchao.ops)) + + +if __name__ == "__main__": + main() From adfe57049028f74774c37b7e1c57c91537fd6ffb Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Mon, 20 May 2024 12:11:14 -0700 Subject: [PATCH 42/61] Disable a flaky test (#257) Summary: att Test Plan: python test/integration/test_integration.py Reviewers: Subscribers: Tasks: Tags: Co-authored-by: Mark Saroufim --- test/integration/test_integration.py | 1 + test/quantization/test_quant_api.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index d701177016..3c5414ceac 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1104,6 +1104,7 @@ def test_weight_only_quant(self): @parameterized.expand(COMMON_DEVICE_DTYPE) @torch.no_grad() @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") + @unittest.skip("This test is flaky, we'll enable later") def test_weight_only_quant_force_mixed_mm(self, device, dtype): if device != "cuda": self.skipTest(f"weight_only_quant_force_mixed_mm can't be constructed on {device}") diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index fcab07c913..8cceefb0a8 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -563,7 +563,7 @@ def get_per_token_block_size(x): input_eps = 1e-5 input_quant_min = -127 input_quant_max = 127 - input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float) + input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") From f0f00cef02516534db3cafb7506da4d0f61ef10e Mon Sep 17 00:00:00 2001 From: lancerts Date: Mon, 20 May 2024 13:12:29 -0700 Subject: [PATCH 43/61] In tutorials/quantize_vit, extract common methods to util.py (#238) * Extract common methods to util.py * Update tutorials/quantize_vit/run_vit_b.py Co-authored-by: Mark Saroufim * Update tutorials/quantize_vit/run_vit_b_quant.py Co-authored-by: Mark Saroufim * amend * amend * Include the torchao utils --------- Co-authored-by: Mark Saroufim Co-authored-by: Mark Saroufim --- torchao/utils.py | 26 +++++++++++++++++++++++ tutorials/quantize_vit/run_vit_b.py | 26 ++--------------------- tutorials/quantize_vit/run_vit_b_quant.py | 26 ++--------------------- 3 files changed, 30 insertions(+), 48 deletions(-) create mode 100644 torchao/utils.py diff --git a/torchao/utils.py b/torchao/utils.py new file mode 100644 index 0000000000..c414843da1 --- /dev/null +++ b/torchao/utils.py @@ -0,0 +1,26 @@ +import torch + + +def benchmark_model(model, num_runs, input_tensor): + torch.cuda.synchronize() + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record() + + # benchmark + for _ in range(num_runs): + with torch.autograd.profiler.record_function("timed region"): + model(input_tensor) + + end_event.record() + torch.cuda.synchronize() + return start_event.elapsed_time(end_event) / num_runs + +def profiler_runner(path, fn, *args, **kwargs): + with torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA], + record_shapes=True) as prof: + result = fn(*args, **kwargs) + prof.export_chrome_trace(path) + return result diff --git a/tutorials/quantize_vit/run_vit_b.py b/tutorials/quantize_vit/run_vit_b.py index ab19f7ba28..a7fd78f9b2 100644 --- a/tutorials/quantize_vit/run_vit_b.py +++ b/tutorials/quantize_vit/run_vit_b.py @@ -1,6 +1,8 @@ import torch import torchvision.models.vision_transformer as models +from torchao.utils import benchmark_model, profiler_runner +torch.set_float32_matmul_precision("high") # Load Vision Transformer model model = models.vit_b_16(pretrained=True) @@ -12,30 +14,6 @@ model = torch.compile(model, mode='max-autotune') -def benchmark_model(model, num_runs, input_tensor): - torch.cuda.synchronize() - start_event = torch.cuda.Event(enable_timing=True) - end_event = torch.cuda.Event(enable_timing=True) - start_event.record() - - # benchmark - for _ in range(num_runs): - with torch.autograd.profiler.record_function("timed region"): - model(input_tensor) - - end_event.record() - torch.cuda.synchronize() - return start_event.elapsed_time(end_event) / num_runs - -def profiler_runner(path, fn, *args, **kwargs): - with torch.profiler.profile( - activities=[torch.profiler.ProfilerActivity.CPU, - torch.profiler.ProfilerActivity.CUDA], - record_shapes=True) as prof: - result = fn(*args, **kwargs) - prof.export_chrome_trace(path) - return result - # Must run with no_grad when optimizing for inference with torch.no_grad(): # warmup diff --git a/tutorials/quantize_vit/run_vit_b_quant.py b/tutorials/quantize_vit/run_vit_b_quant.py index c329c28d0c..0396a9dffd 100644 --- a/tutorials/quantize_vit/run_vit_b_quant.py +++ b/tutorials/quantize_vit/run_vit_b_quant.py @@ -2,6 +2,8 @@ import torchao import torchvision.models.vision_transformer as models +from torchao.utils import benchmark_model, profiler_runner +torch.set_float32_matmul_precision("high") # Load Vision Transformer model model = models.vit_b_16(pretrained=True) @@ -19,30 +21,6 @@ model = torch.compile(model, mode='max-autotune') -def benchmark_model(model, num_runs, input_tensor): - torch.cuda.synchronize() - start_event = torch.cuda.Event(enable_timing=True) - end_event = torch.cuda.Event(enable_timing=True) - start_event.record() - - # benchmark - for _ in range(num_runs): - with torch.autograd.profiler.record_function("timed region"): - model(input_tensor) - - end_event.record() - torch.cuda.synchronize() - return start_event.elapsed_time(end_event) / num_runs - -def profiler_runner(path, fn, *args, **kwargs): - with torch.profiler.profile( - activities=[torch.profiler.ProfilerActivity.CPU, - torch.profiler.ProfilerActivity.CUDA], - record_shapes=True) as prof: - result = fn(*args, **kwargs) - prof.export_chrome_trace(path) - return result - # Must run with no_grad when optimizing for inference with torch.no_grad(): # warmup From 3c9bc20d7d5f7d33ee9790d03a2f7dc558693645 Mon Sep 17 00:00:00 2001 From: Eli Uriegas <1700823+seemethere@users.noreply.github.com> Date: Mon, 20 May 2024 15:08:43 -0700 Subject: [PATCH 44/61] ci: Add push trigger for binary build workflows (#259) --- .github/workflows/build_wheels_linux.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/build_wheels_linux.yml b/.github/workflows/build_wheels_linux.yml index d81f2f8434..c20b481828 100644 --- a/.github/workflows/build_wheels_linux.yml +++ b/.github/workflows/build_wheels_linux.yml @@ -6,6 +6,15 @@ on: paths: - build/packaging/** - .github/workflows/build_wheels_linux.yml + push: + branches: + - nightly + - main + - release/* + tags: + # NOTE: Binary build pipelines should only get triggered on release candidate builds + # Release candidate tags look like: v1.11.0-rc1 + - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ schedule: - cron: '0 0 * * *' # Runs at midnight UTC every day workflow_dispatch: From 5e2810924e286af4c55739d710aa3b0de4a8161f Mon Sep 17 00:00:00 2001 From: Yi Liu <106061964+yiliu30@users.noreply.github.com> Date: Wed, 22 May 2024 04:59:42 +0800 Subject: [PATCH 45/61] Fixed the HQQ import skip (#262) --- test/hqq/test_triton_mm.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/hqq/test_triton_mm.py b/test/hqq/test_triton_mm.py index 628723ea1c..4684f28221 100644 --- a/test/hqq/test_triton_mm.py +++ b/test/hqq/test_triton_mm.py @@ -5,12 +5,9 @@ "triton", minversion="3.0.0", reason="Triton > 3.0.0 required to run this test" ) hqq = pytest.importorskip("hqq", reason="hqq required to run this test") -HQQLinear = pytest.importorskip( - "hqq.core.quantize.HQQLinear", reason="HQQLinear required to run this test" -) -BaseQuantizeConfig = pytest.importorskip( - "hqq.core.quantize.BaseQuantizeConfig", reason="HQQLinear required to run this test" -) +hqq_quantize = pytest.importorskip("hqq.core.quantize", reason="hqq required to run this test") +HQQLinear = hqq_quantize.HQQLinear +BaseQuantizeConfig = hqq_quantize.BaseQuantizeConfig import itertools From cb8b6515941ee4e2f20dc3d75129aa5d6ed8b2c0 Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Thu, 23 May 2024 13:56:05 -0700 Subject: [PATCH 46/61] FP8 splitgemm user defined triton kernel (#263) * FP8 splitgemm user defined triton kernel * yolo * Trigger CI * yolo * yolo * yolo * yolo * Update test_fp8.py --- test/dtypes/test_fp8.py | 45 ++++++++++ torchao/prototype/fp8/__init__.py | 1 + torchao/prototype/fp8/splitk_gemm.py | 119 +++++++++++++++++++++++++++ torchao/quantization/utils.py | 1 + 4 files changed, 166 insertions(+) create mode 100644 test/dtypes/test_fp8.py create mode 100644 torchao/prototype/fp8/__init__.py create mode 100644 torchao/prototype/fp8/splitk_gemm.py diff --git a/test/dtypes/test_fp8.py b/test/dtypes/test_fp8.py new file mode 100644 index 0000000000..3218126d41 --- /dev/null +++ b/test/dtypes/test_fp8.py @@ -0,0 +1,45 @@ +import os +import unittest +import torch +from torch.testing._internal.common_utils import ( + TestCase, + instantiate_parametrized_tests, + parametrize, + run_tests, +) +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 +try: + from torchao.prototype.fp8 import gemm_split_k + triton_available = True +except ImportError: + triton_available = False + +@unittest.skipIf(not triton_available, "Triton is required but not available") +@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required") +class TestFP8Gemm(TestCase): + # @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) + def test_gemm_split_k(self): + m, n, k = 256, 256, 512 + + a = torch.randn((m, k), dtype=torch.float16, device="cuda") + b = torch.randn((k, n), dtype=torch.float16, device="cuda") + c = gemm_split_k(a, b) + c_expected = torch.matmul(a, b) + assert torch.allclose(c, c_expected, atol=0.07) # less than this and the accuracy check fails + + # https://pytorch.org/tutorials/recipes/torch_compile_user_defined_triton_kernel_tutorial.html + @unittest.skip("fp8 kernel compilation does not work on a10g") + def test_user_defined_triton_function(self): + import torch._inductor.config + torch._inductor.config.force_disable_caches = True + os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' + m, n, k = 256, 256, 512 + + a = torch.randn((m, k), dtype=torch.float16, device="cuda") + b = torch.randn((k, n), dtype=torch.float16, device="cuda") + compiled_function = torch.compile(gemm_split_k, fullgraph=True)(a,b) + + + +if __name__ == "__main__": + run_tests() diff --git a/torchao/prototype/fp8/__init__.py b/torchao/prototype/fp8/__init__.py new file mode 100644 index 0000000000..34b513239b --- /dev/null +++ b/torchao/prototype/fp8/__init__.py @@ -0,0 +1 @@ +from .splitk_gemm import gemm_split_k diff --git a/torchao/prototype/fp8/splitk_gemm.py b/torchao/prototype/fp8/splitk_gemm.py new file mode 100644 index 0000000000..0bd37dffc3 --- /dev/null +++ b/torchao/prototype/fp8/splitk_gemm.py @@ -0,0 +1,119 @@ +# Code from https://github.com/pytorch-labs/applied-ai/blob/main/kernels/triton/inference/fp8/splitk_gemm_fp8.py +import torch +import triton +import triton.language as tl + +@triton.jit +def grouped_launch(pid, + m, n, + block_m: tl.constexpr, block_n: tl.constexpr, group_m: tl.constexpr): + + grid_m = tl.cdiv(m, block_m) + grid_n = tl.cdiv(n, block_n) + + width = group_m * grid_n + group_id = pid // width + group_size = tl.minimum(grid_m - group_id * group_m, group_m) + + pid_m = group_id * group_m + (pid % group_size) + pid_n = (pid % width) // group_size + + return pid_m, pid_n + + +@triton.jit() +def col_major(pid, + m, n, + block_m: tl.constexpr, block_n: tl.constexpr): + + grid_m = tl.cdiv(m, block_m) + + pid_m = pid % grid_m + pid_n = pid // grid_m + + return pid_m, pid_n + + +@triton.jit +def gemm_split_k_kernel(a_ptr, b_ptr, c_ptr, + stride_am, stride_ak, + stride_bk, stride_bn, + stride_cm, stride_cn, + m, n, k, + block_m: tl.constexpr, block_n: tl.constexpr, block_k: tl.constexpr, + split_k: tl.constexpr, group_m: tl.constexpr): + + pid = tl.program_id(0) + pid_k = tl.program_id(1) + grid_k = tl.cdiv(k, block_k*split_k) + + pid_m, pid_n = grouped_launch(pid, + m, n, + block_m, block_n, group_m) + + offs_m = pid_m*block_m + tl.arange(0, block_m) + offs_n = pid_n*block_n + tl.arange(0, block_n) + offs_k = pid_k*block_k + tl.arange(0, block_k) + + offs_am = tl.max_contiguous(tl.multiple_of(offs_m, block_m), block_m) + offs_bn = tl.max_contiguous(tl.multiple_of(offs_n, block_n), block_n) + + a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) + b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) + + acc = tl.zeros((block_m, block_n), dtype=tl.float32) + for k_ in range(0, grid_k): + + k_remaining = k - k_ * (block_k * split_k) + + a = tl.load(a_ptrs, mask=offs_k[None, :] < k_remaining, other=0.0) + b = tl.load(b_ptrs, mask=offs_k[:, None] < k_remaining, other=0.0) + + acc = tl.dot(a, b, acc, out_dtype=tl.float32) + + a_ptrs += block_k * split_k * stride_ak + b_ptrs += block_k * split_k * stride_bk + + acc = acc.to(tl.float16) + + offs_m = pid_m*block_m + tl.arange(0, block_m) + offs_n = pid_n*block_n + tl.arange(0, block_n) + + c_ptrs = c_ptr + (offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn) + mask = (offs_m < m)[:, None] & (offs_n < n)[None, :] + + tl.atomic_add(c_ptrs, acc, mask=mask) + +def gemm_split_k(a, b): + + m, k = a.shape + _, n = b.shape + + # Need to change these otherwise was getting + # triton.runtime.errors.OutOfResources: out of resource: shared memory, Required: 393216, Hardware limit: 232448. Reducing block sizes or `num_stages` may help. + # TODO: Should we tune this differently for different hardware? + block_m = 32 + block_n = 32 + block_k = 256 + num_stages = 2 + num_warps = 4 + split_k = 4 + group_m = 8 + + total_blocks_m = triton.cdiv(m, block_m) + total_blocks_n = triton.cdiv(n, block_n) + total_programs_mn = total_blocks_m * total_blocks_n + total_programs_k = split_k + + grid = (total_programs_mn, total_programs_k) + + c = torch.zeros((m, n), device=a.device, dtype=torch.float16) + gemm_split_k_kernel[grid](a, b, c, + a.stride(0), a.stride(1), + b.stride(0), b.stride(1), + c.stride(0), c.stride(1), + m, n, k, + block_m, block_n, block_k, + split_k, group_m, num_stages=num_stages, num_warps=num_warps) + + return c diff --git a/torchao/quantization/utils.py b/torchao/quantization/utils.py index a178edf125..6ee059f288 100644 --- a/torchao/quantization/utils.py +++ b/torchao/quantization/utils.py @@ -99,6 +99,7 @@ def get_model_size_in_bytes(model): s += b.nelement() * b.element_size() return s +# TODO: quantization namespace is not the right place ot have this if version.parse(torch.__version__) >= version.parse("2.4.0.dev"): TORCH_VERSION_AFTER_2_4 = True else: From 4a4e86bb799866e746b760b357b5e7b91e1683fd Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Thu, 23 May 2024 18:19:35 -0700 Subject: [PATCH 47/61] Make fp8 test explicit (#266) * Make fp8 test explicit * yolo * start test utils * yolo --- test/dtypes/test_fp8.py | 45 +++++++++++++++++-------- torchao/prototype/fp8/__init__.py | 2 +- torchao/prototype/fp8/splitk_gemm.py | 49 +++++++++++++--------------- torchao/utils.py | 20 ++++++++++-- 4 files changed, 72 insertions(+), 44 deletions(-) diff --git a/test/dtypes/test_fp8.py b/test/dtypes/test_fp8.py index 3218126d41..811de3a4c3 100644 --- a/test/dtypes/test_fp8.py +++ b/test/dtypes/test_fp8.py @@ -8,38 +8,55 @@ run_tests, ) from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 + try: - from torchao.prototype.fp8 import gemm_split_k + from torchao.prototype.fp8 import gemm_split_k, to_float8 triton_available = True except ImportError: triton_available = False +from torchao.utils import skip_if_compute_capability_less_than + @unittest.skipIf(not triton_available, "Triton is required but not available") @unittest.skipIf(not torch.cuda.is_available(), "CUDA is required") class TestFP8Gemm(TestCase): - # @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) + @skip_if_compute_capability_less_than(9.0) def test_gemm_split_k(self): - m, n, k = 256, 256, 512 + dtype = torch.float16 + qdtype = torch.float8_e4m3fn - a = torch.randn((m, k), dtype=torch.float16, device="cuda") - b = torch.randn((k, n), dtype=torch.float16, device="cuda") - c = gemm_split_k(a, b) - c_expected = torch.matmul(a, b) - assert torch.allclose(c, c_expected, atol=0.07) # less than this and the accuracy check fails + torch.cuda.manual_seed(0) + + m = 64 + n = 4096 + k = 4096 + + # create test inputs + x = torch.randn((m, k), dtype=dtype, device='cuda') + w = torch.randn((n, k), dtype=dtype, device='cuda') + + x_fp8, x_inv_s = to_float8(x, dtype=qdtype) + w_fp8, w_inv_s = to_float8(w, dtype=qdtype) + + y_torch, _ = torch._scaled_mm(x_fp8, w_fp8.t(), out_dtype=dtype, scale_a=x_inv_s, scale_b=w_inv_s) + y_triton = gemm_split_k(x_fp8, w_fp8.t(), scale_a=x_inv_s.item(), scale_b=w_inv_s.item()) + y_fp16 = torch.nn.functional.linear(x, w) + + cos_sim_torch = torch.nn.functional.cosine_similarity(y_fp16.reshape(-1), y_torch.reshape(-1), dim=0) + cos_sim_triton = torch.nn.functional.cosine_similarity(y_fp16.reshape(-1), y_triton.reshape(-1), dim=0) + + assert cos_sim_torch > 0.99, f"fp16 vs torch cos_sim is too low: {cos_sim_torch}" + assert cos_sim_triton > 0.99, f"fp16 vs triton cos_sim is too low: {cos_sim_triton}" # https://pytorch.org/tutorials/recipes/torch_compile_user_defined_triton_kernel_tutorial.html - @unittest.skip("fp8 kernel compilation does not work on a10g") + @skip_if_compute_capability_less_than(9.0) + @unittest.skip("On H100: OutOfResources: out of resource: shared memory, Required: 393216, Hardware limit: 232448. Reducing block sizes or `num_stages` may help.") def test_user_defined_triton_function(self): - import torch._inductor.config - torch._inductor.config.force_disable_caches = True - os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' m, n, k = 256, 256, 512 a = torch.randn((m, k), dtype=torch.float16, device="cuda") b = torch.randn((k, n), dtype=torch.float16, device="cuda") compiled_function = torch.compile(gemm_split_k, fullgraph=True)(a,b) - - if __name__ == "__main__": run_tests() diff --git a/torchao/prototype/fp8/__init__.py b/torchao/prototype/fp8/__init__.py index 34b513239b..44983dbf43 100644 --- a/torchao/prototype/fp8/__init__.py +++ b/torchao/prototype/fp8/__init__.py @@ -1 +1 @@ -from .splitk_gemm import gemm_split_k +from .splitk_gemm import gemm_split_k, to_float8 diff --git a/torchao/prototype/fp8/splitk_gemm.py b/torchao/prototype/fp8/splitk_gemm.py index 0bd37dffc3..1efaa731db 100644 --- a/torchao/prototype/fp8/splitk_gemm.py +++ b/torchao/prototype/fp8/splitk_gemm.py @@ -1,7 +1,8 @@ -# Code from https://github.com/pytorch-labs/applied-ai/blob/main/kernels/triton/inference/fp8/splitk_gemm_fp8.py import torch import triton import triton.language as tl +import os +os.environ['ENABLE_TMA'] = '1' @triton.jit def grouped_launch(pid, @@ -20,25 +21,12 @@ def grouped_launch(pid, return pid_m, pid_n - -@triton.jit() -def col_major(pid, - m, n, - block_m: tl.constexpr, block_n: tl.constexpr): - - grid_m = tl.cdiv(m, block_m) - - pid_m = pid % grid_m - pid_n = pid // grid_m - - return pid_m, pid_n - - @triton.jit def gemm_split_k_kernel(a_ptr, b_ptr, c_ptr, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, + scale_a, scale_b, m, n, k, block_m: tl.constexpr, block_n: tl.constexpr, block_k: tl.constexpr, split_k: tl.constexpr, group_m: tl.constexpr): @@ -61,6 +49,7 @@ def gemm_split_k_kernel(a_ptr, b_ptr, c_ptr, a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) + acc = tl.zeros((block_m, block_n), dtype=tl.float32) for k_ in range(0, grid_k): @@ -74,7 +63,8 @@ def gemm_split_k_kernel(a_ptr, b_ptr, c_ptr, a_ptrs += block_k * split_k * stride_ak b_ptrs += block_k * split_k * stride_bk - acc = acc.to(tl.float16) + acc = scale_a * scale_b * acc + acc.to(tl.float16) offs_m = pid_m*block_m + tl.arange(0, block_m) offs_n = pid_n*block_n + tl.arange(0, block_n) @@ -84,19 +74,16 @@ def gemm_split_k_kernel(a_ptr, b_ptr, c_ptr, tl.atomic_add(c_ptrs, acc, mask=mask) -def gemm_split_k(a, b): - +def gemm_split_k(a, b, scale_a:float=1.0, scale_b:float=1.0): + assert a.shape[1] == b.shape[0] m, k = a.shape _, n = b.shape - # Need to change these otherwise was getting - # triton.runtime.errors.OutOfResources: out of resource: shared memory, Required: 393216, Hardware limit: 232448. Reducing block sizes or `num_stages` may help. - # TODO: Should we tune this differently for different hardware? - block_m = 32 - block_n = 32 - block_k = 256 - num_stages = 2 - num_warps = 4 + block_m = 64 + block_n = 64 + block_k = 512 + num_stages = 3 + num_warps = 8 split_k = 4 group_m = 8 @@ -108,12 +95,20 @@ def gemm_split_k(a, b): grid = (total_programs_mn, total_programs_k) c = torch.zeros((m, n), device=a.device, dtype=torch.float16) - gemm_split_k_kernel[grid](a, b, c, + k = gemm_split_k_kernel[grid](a, b, c, a.stride(0), a.stride(1), b.stride(0), b.stride(1), c.stride(0), c.stride(1), + scale_a, scale_b, m, n, k, block_m, block_n, block_k, split_k, group_m, num_stages=num_stages, num_warps=num_warps) return c + + +def to_float8(x, dtype=torch.float8_e4m3fn): + finfo = torch.finfo(dtype) + scale = finfo.max / x.abs().max().clamp(min=1e-12) + x_scl_sat = (x * scale).clamp(min=finfo.min, max=finfo.max) + return x_scl_sat.to(dtype), scale.float().reciprocal() diff --git a/torchao/utils.py b/torchao/utils.py index c414843da1..fcf853da3e 100644 --- a/torchao/utils.py +++ b/torchao/utils.py @@ -6,12 +6,12 @@ def benchmark_model(model, num_runs, input_tensor): start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) start_event.record() - + # benchmark for _ in range(num_runs): with torch.autograd.profiler.record_function("timed region"): model(input_tensor) - + end_event.record() torch.cuda.synchronize() return start_event.elapsed_time(end_event) / num_runs @@ -24,3 +24,19 @@ def profiler_runner(path, fn, *args, **kwargs): result = fn(*args, **kwargs) prof.export_chrome_trace(path) return result + +def get_compute_capability(): + if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + return float(f"{capability[0]}.{capability[1]}") + return 0.0 + +def skip_if_compute_capability_less_than(min_capability): + import unittest + def decorator(test_func): + def wrapper(*args, **kwargs): + if get_compute_capability() < min_capability: + raise unittest.SkipTest(f"Compute capability is less than {min_capability}") + return test_func(*args, **kwargs) + return wrapper + return decorator From 3ed16050bdbbe842b08a875266ebee72669f42b4 Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Thu, 23 May 2024 18:50:57 -0700 Subject: [PATCH 48/61] Remove test_ops.py warning spew (#267) --- test/test_ops.py | 2 ++ torchao/ops.py | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index e260e86f0f..d73ae536ac 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -5,10 +5,12 @@ from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 import unittest from parameterized import parameterized +import pytest # torch.testing._internal.optests.generate_tests.OpCheckError: opcheck(op, ...): # test_faketensor failed with module 'torch' has no attribute '_custom_ops' (scroll up for stack trace) +@pytest.mark.filterwarnings("ignore:create_unbacked_symint is deprecated, please use new_dynamic_size instead:UserWarning") @unittest.skipIf(IS_FBCODE, "Skipping the test in fbcode since we don't have TARGET file for kernels") class TestOps(TestCase): def _create_tensors_with_iou(self, N, iou_thresh): diff --git a/torchao/ops.py b/torchao/ops.py index 3a25dbf6db..fcc6ae9364 100644 --- a/torchao/ops.py +++ b/torchao/ops.py @@ -1,5 +1,14 @@ import torch from torch import Tensor +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 + +def register_custom_op(name): + def decorator(func): + if TORCH_VERSION_AFTER_2_4: + return torch.library.register_fake(f"{name}")(func) + else: + return torch.library.impl_abstract(f"{name}")(func) + return decorator def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: """ @@ -9,7 +18,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: # Defines the meta kernel / fake kernel / abstract impl -@torch.library.impl_abstract("torchao::nms") +@register_custom_op("torchao::nms") def _(dets, scores, iou_threshold): torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D") torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}") @@ -36,7 +45,7 @@ def prepack_fp6_weight(fp6_weight: Tensor) -> Tensor: return torch.ops.torchao.prepack_fp6_weight.default(fp6_weight) -@torch.library.impl_abstract("torchao::prepack_fp6_weight") +@register_custom_op("torchao::prepack_fp6_weight") def _(fp6_weight): torch._check(fp6_weight.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_weight.dim()}D") return torch.empty_like(fp6_weight) @@ -49,7 +58,7 @@ def fp16_to_fp6(fp16_tensor: Tensor) -> Tensor: return torch.ops.torchao.fp16_to_fp6.default(fp16_tensor) -@torch.library.impl_abstract("torchao::fp16_to_fp6") +@register_custom_op("torchao::fp16_to_fp6") def _(fp16_tensor): torch._check(fp16_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp16_tensor.dim()}D") torch._check(fp16_tensor.dtype is torch.float16, lambda: f"weight must be FP16, got {fp16_tensor.dtype}") @@ -74,7 +83,7 @@ def fp16act_fp6weight_linear(_in_feats: Tensor, _weights: Tensor, _scales: Tenso return torch.ops.torchao.fp16act_fp6weight_linear.default(_in_feats, _weights, _scales, splitK) -@torch.library.impl_abstract("torchao::fp16act_fp6weight_linear") +@register_custom_op("torchao::fp16act_fp6weight_linear") def _(_in_feats, _weights, _scales, splitK = 1): torch._check(_in_feats.dim() == 2, lambda: f"input should be a 2d tensor, got {_in_feats.dim()}D") torch._check(_in_feats.dtype is torch.float16, lambda: f"weight must be FP16, got {_in_feats.dtype}") @@ -95,7 +104,7 @@ def fp6_weight_dequant(fp6_tensor: Tensor, fp16_scale: Tensor) -> Tensor: return torch.ops.torchao.fp6_weight_dequant.default(fp6_tensor, fp16_scale) -@torch.library.impl_abstract("torchao::fp6_weight_dequant") +@register_custom_op("torchao::fp6_weight_dequant") def _(fp6_tensor, fp16_scale): torch._check(fp6_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_tensor.dim()}D") torch._check(fp6_tensor.dtype is torch.int32, lambda: f"weight must be INT32, got {fp6_tensor.dtype}") From 49755f650a4959d1382f3c2a94c79e68bff76f64 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Thu, 23 May 2024 20:36:16 -0700 Subject: [PATCH 49/61] Print the code when the check failed (#254) Summary: python test/integration/test_integration.py -k test_weight_only_quant_force_mixed_mm Test Plan: Reviewers: Subscribers: Tasks: Tags: --- test/integration/test_integration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 3c5414ceac..5f34b761cd 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -1104,7 +1104,6 @@ def test_weight_only_quant(self): @parameterized.expand(COMMON_DEVICE_DTYPE) @torch.no_grad() @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") - @unittest.skip("This test is flaky, we'll enable later") def test_weight_only_quant_force_mixed_mm(self, device, dtype): if device != "cuda": self.skipTest(f"weight_only_quant_force_mixed_mm can't be constructed on {device}") @@ -1127,7 +1126,7 @@ def test_weight_only_quant_force_mixed_mm(self, device, dtype): sqnr = compute_error(y_ref, y_wo) self.assertGreaterEqual(sqnr, 42.75) if device == "cuda": - self.assertTrue("mixed_mm" in code) + self.assertTrue("mixed_mm" in code, f"got code: {code}") @parameterized.expand(COMMON_DEVICE_DTYPE) @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") From 163cb93d96e411b5640ea8e0a001a88774e65892 Mon Sep 17 00:00:00 2001 From: HDCharles <39544797+HDCharles@users.noreply.github.com> Date: Fri, 24 May 2024 00:22:33 -0400 Subject: [PATCH 50/61] fixing autoquant bug (#265) Summary: in some model topographies you see the same weight accessed by multiple modules which caused a bug where weights would get autoquantized multiple times. Also fixed a shape issue with x_scales in some situation with new primitives. Also changed default for autoquant to be interpolation which seems to work better for torchbench benchmarking Test Plan: python test/integration/test_integration.py -k "autoquant" Reviewers: Subscribers: Tasks: Tags: --- test/integration/test_integration.py | 41 +++++++++++++++++++++++++++- torchao/quantization/autoquant.py | 4 +-- torchao/quantization/quant_api.py | 3 +- 3 files changed, 44 insertions(+), 4 deletions(-) diff --git a/test/integration/test_integration.py b/test/integration/test_integration.py index 5f34b761cd..3bc8ded793 100644 --- a/test/integration/test_integration.py +++ b/test/integration/test_integration.py @@ -61,7 +61,8 @@ AQInt8DynamicallyQuantizedLinearWeight, AQWeightOnlyQuantizedLinearWeight, AQWeightOnlyQuantizedLinearWeight2, - AQWeightOnlyQuantizedLinearWeight3 + AQWeightOnlyQuantizedLinearWeight3, + AutoQuantizableLinearWeight, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx @@ -1471,6 +1472,44 @@ def forward(self, x, y): sqnr = SQNR(out, out2) self.assertTrue(sqnr >= 30) + @parameterized.expand(combine_parameters(COMMON_DEVICE_DTYPE, + [ + (16, 128, 128), + ])) + @unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "autoquant requires 2.3+.") + def test_autoquant_double_access(self, device, dtype, m, k, n): + if device != "cuda" and dtype != torch.bfloat16: + self.skipTest(f"autoquant currently does not support {device}") + if device != "cuda" or not torch.cuda.is_available(): + self.skipTest(f"autoquant currently does not support {device}") + if torch.cuda.is_available() and torch.cuda.get_device_capability() < (8, 0): + if dtype == torch.bfloat16: + self.skipTest(f"bfloat16 requires sm80+") + + class DoubleAccess(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin1 = torch.nn.Linear(k, n) + self.lin2 = torch.nn.Linear(n, k) + self.lin3 = torch.nn.Linear(k, n) + self.lin3.weight = self.lin1.weight + + def forward(self, x): + x = self.lin1(x) + x = self.lin2(x) + x = self.lin3(x) + return x + + x_in = torch.randn(m, k, device=device, dtype=dtype) + model = DoubleAccess().to(device).to(dtype) + model(x_in) + torchao.autoquant(model) + assert not isinstance(model.lin1.weight.weight, AutoQuantizableLinearWeight) + model(x_in) + + + + class TestAOTI(unittest.TestCase): @parameterized.expand( list(itertools.product(TENSOR_SUBCLASS_APIS, COMMON_DEVICES, COMMON_DTYPES)), diff --git a/torchao/quantization/autoquant.py b/torchao/quantization/autoquant.py index 4c0ae53ce8..808f7d89d3 100644 --- a/torchao/quantization/autoquant.py +++ b/torchao/quantization/autoquant.py @@ -252,7 +252,7 @@ def _autoquant_test(cls, act_mat, weight, bias, best_time, mode=["relu", None]): ) q_c_matmul=torch.compile(quantized_matmul, mode="max-autotune-no-cudagraphs") with torch.no_grad(): - res_matmul = do_autoquant_bench(q_c_matmul, x_vals_int8, x_scales, w_qtensor.int_data) + res_matmul = do_autoquant_bench(q_c_matmul, x_vals_int8, x_scales.reshape(-1,1), w_qtensor.int_data) print(f">>time: {res_matmul:0.3f}ms for {cls} matmul, to_beat: {best_time:0.3f}ms") # if the (much faster) matmul kernel is already beat, don't bother benchmarking full op @@ -384,7 +384,7 @@ def change_autoquantizable_to_quantized(model, **kwargs): torch._dynamo.reset() @torch.no_grad() -def autoquant(model, example_input=None, qtensor_class_list=DEFAULT_CLASS_LIST, filter_fn=None, mode=["relu",None], **aq_kwargs): +def autoquant(model, example_input=None, qtensor_class_list=DEFAULT_CLASS_LIST, filter_fn=None, mode=["interpolate", .85], **aq_kwargs): """ wraps model in AutoQuantWrapper, if example_input is provided, runs forward on it, otherwise returns the wrapped model. AutoQuantWrapper handles instances where model is torch.compiled by first performing autoquantization on the original diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index a5a3a2b3db..20c52aa3f0 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -34,7 +34,7 @@ Int4WeightOnlyGPTQQuantizer, Int4WeightOnlyQuantizer, ) -from .autoquant import autoquant +from .autoquant import autoquant, AutoQuantizableLinearWeight __all__ = [ @@ -91,6 +91,7 @@ def _is_linear(mod, *args): isinstance(mod, torch.nn.Linear) and hasattr(mod, "weight") and not isinstance(mod.weight, QuantizedLinearWeightBase) + and not isinstance(mod.weight, AutoQuantizableLinearWeight) ) From 923ff4dd18d5d70d943206d8638641cf472cafd8 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 24 May 2024 10:41:41 -0700 Subject: [PATCH 51/61] Add `quantize` (#256) Summary: This exposes the AffineQuantizedTensor and LinearActQuantizedTensor subclass as a model level API that will replace the weights of linear layers This is in preparation to replace existing tensor subclass APIs such as `change_linear_weights_to_int4_woqtensors` but currently we can't combine the two quantizers due to some problem with parametrization/nn.Parameter the error is: raise KeyError(f"attribute '{name}' already exists") KeyError: "attribute 'weight' already exists" happens in ``` lin.weight = torch.nn.Parameter(constructor(lin.weight, **copied_kwargs), requires_grad=False) ``` Test Plan: regression tests: ``` python test/quantization/test_quant_api.py ``` Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_api.py | 115 +++++++++++++++------------- torchao/quantization/quant_api.py | 50 +++++++++++- torchao/quantization/subclass.py | 35 ++------- torchao/quantization/utils.py | 49 ++++++++++++ 4 files changed, 166 insertions(+), 83 deletions(-) diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index 8cceefb0a8..f0830cf8a8 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -18,12 +18,24 @@ get_symmetric_quantization_config, ) +from torchao.quantization.subclass import ( + to_aqt, + to_laqt, + AffineQuantizedTensor, + LinearActQuantizedTensor, +) +from torchao.quantization.quant_primitives import ( + MappingType, + ZeroPointDomain, +) + from torchao.quantization.quant_api import ( _replace_with_custom_fn_if_matches_filter, apply_dynamic_quant, apply_weight_only_int8_quant, Quantizer, TwoStepQuantizer, + quantize, ) from torchao.quantization.utils import ( TORCH_VERSION_AFTER_2_3, @@ -32,6 +44,7 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor from model import Transformer, prepare_inputs_for_model +import copy def dynamic_quant(model, example_inputs): @@ -92,8 +105,8 @@ def __init__(self, m=64, n=32, k=64): self.linear1 = torch.nn.Linear(m, n, bias=False).to(torch.float) self.linear2 = torch.nn.Linear(n, k, bias=False).to(torch.float) - def example_inputs(self): - return (torch.randn(1, self.linear1.in_features).to(torch.float),) + def example_inputs(self, batch_size=1): + return (torch.randn(batch_size, self.linear1.in_features).to(torch.float),) def forward(self, x): x = self.linear1(x) @@ -395,13 +408,6 @@ def test_eval_wrapper(self): # TODO: move to a separate test file @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") def test_quantized_tensor_subclass_8da4w(self): - from torchao.quantization.subclass import ( - AffineQuantizedTensor, - LinearActQuantizedTensor, - ) - from torchao.quantization.quant_primitives import MappingType - import copy - # weight settings groupsize = 32 mapping_type = MappingType.SYMMETRIC @@ -423,20 +429,26 @@ def get_per_token_block_size(x): # input settings input_mapping_type = MappingType.ASYMMETRIC input_target_dtype = torch.int8 - input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) - - def dynamic_quant(linear): - # note: order is important - linear.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(linear.weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps), requires_grad=False) - linear.weight = torch.nn.Parameter(LinearActQuantizedTensor.from_float(linear.weight, input_quant_func), requires_grad=False) + input_quant_func = lambda x: to_aqt(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) m = ToyLinearModel().eval() m_copy = copy.deepcopy(m) example_inputs = m.example_inputs() - dynamic_quant(m.linear1) - dynamic_quant(m.linear2) + + def apply_weight_quant(weight): + return to_aqt(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps) + + def apply_act_quant(weight): + return to_laqt(weight, input_quant_func) + + # note: order is important + m = quantize(m, apply_weight_quant) + m = quantize(m, apply_act_quant) + assert isinstance(m.linear1.weight, LinearActQuantizedTensor) assert isinstance(m.linear2.weight, LinearActQuantizedTensor) + assert isinstance(m.linear1.weight.original_weight_tensor, AffineQuantizedTensor) + assert isinstance(m.linear2.weight.original_weight_tensor, AffineQuantizedTensor) # reference from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer @@ -454,11 +466,6 @@ def dynamic_quant(linear): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int4(self): - from torchao.quantization.subclass import AffineQuantizedTensor - from torchao.quantization.quant_primitives import MappingType - from torchao.quantization.quant_primitives import ZeroPointDomain - import copy - # weight settings groupsize = 32 mapping_type = MappingType.ASYMMETRIC @@ -469,22 +476,17 @@ def test_quantized_tensor_subclass_int4(self): eps = 1e-6 preserve_zero = False zero_point_dtype = torch.bfloat16 + zero_point_domain = ZeroPointDomain.FLOAT # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") m_copy = copy.deepcopy(m) example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) - def to_quantized(weight): - return AffineQuantizedTensor.from_float( - weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, - zero_point_dtype=zero_point_dtype, - preserve_zero=preserve_zero, - zero_point_domain=ZeroPointDomain.FLOAT, - ) + def apply_weight_quant(weight): + return to_aqt(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) - m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) - m.linear2.weight = torch.nn.Parameter(to_quantized(m.linear2.weight), requires_grad=False) + m = quantize(m, apply_weight_quant) assert isinstance(m.linear1.weight, AffineQuantizedTensor) assert isinstance(m.linear2.weight, AffineQuantizedTensor) @@ -501,10 +503,6 @@ def to_quantized(weight): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int8(self): - from torchao.quantization.subclass import AffineQuantizedTensor - from torchao.quantization.quant_primitives import MappingType - import copy - # weight settings mapping_type = MappingType.SYMMETRIC target_dtype = torch.int8 @@ -515,12 +513,12 @@ def test_quantized_tensor_subclass_int8(self): m_copy = copy.deepcopy(m) example_inputs = tuple(map(lambda x: x.to(torch.bfloat16), m.example_inputs())) - def to_quantized(weight): + def apply_weight_quant(weight): block_size = (1, weight.shape[1]) - return AffineQuantizedTensor.from_float(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + return to_aqt(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + + m = quantize(m, apply_weight_quant) - m.linear1.weight = torch.nn.Parameter(to_quantized(m.linear1.weight), requires_grad=False) - m.linear2.weight = torch.nn.Parameter(to_quantized(m.linear2.weight), requires_grad=False) assert isinstance(m.linear1.weight, AffineQuantizedTensor) assert isinstance(m.linear2.weight, AffineQuantizedTensor) @@ -537,12 +535,6 @@ def to_quantized(weight): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int8_dyn_quant(self): - from torchao.quantization.subclass import AffineQuantizedTensor - from torchao.quantization.subclass import LinearActQuantizedTensor - from torchao.quantization.quant_primitives import MappingType - from torchao.quantization.quant_primitives import ZeroPointDomain - import copy - # weight settings mapping_type = MappingType.SYMMETRIC def get_weight_block_size(x): @@ -563,20 +555,24 @@ def get_per_token_block_size(x): input_eps = 1e-5 input_quant_min = -127 input_quant_max = 127 - input_quant_func = lambda x: AffineQuantizedTensor.from_float(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) + input_quant_func = lambda x: to_aqt(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") m_copy = copy.deepcopy(m) - example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) + # setting batch_size to 20 to be compatible with the kernel + example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs(batch_size=20))) + + def apply_weight_quant(weight): + block_size = get_weight_block_size(weight) + return to_aqt(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) - def dynamic_quant(linear): - # note: order is important - linear.weight = torch.nn.Parameter(AffineQuantizedTensor.from_float(linear.weight, mapping_type, get_weight_block_size(linear.weight), target_dtype, eps=eps, zero_point_dtype=zero_point_dtype), requires_grad=False) - linear.weight = torch.nn.Parameter(LinearActQuantizedTensor.from_float(linear.weight, input_quant_func), requires_grad=False) + def apply_act_quant(weight): + return to_laqt(weight, input_quant_func) + + m = quantize(m, apply_weight_quant) + m = quantize(m, apply_act_quant) - dynamic_quant(m.linear1) - dynamic_quant(m.linear2) assert isinstance(m.linear1.weight, LinearActQuantizedTensor) assert isinstance(m.linear2.weight, LinearActQuantizedTensor) assert isinstance(m.linear1.weight.original_weight_tensor, AffineQuantizedTensor) @@ -591,6 +587,19 @@ def dynamic_quant(linear): self.assertTrue(torch.equal(res, ref)) + # workaround for export path + from torchao.quantization.utils import unwrap_tensor_subclass + m_unwrapped = unwrap_tensor_subclass(m) + + m = torch.export.export(m_unwrapped, example_inputs).module() + exported_model_res = m(*example_inputs) + + self.assertTrue(torch.equal(exported_model_res, ref)) + + # make sure it compiles + torch._export.aot_compile(m_unwrapped, example_inputs) + + if __name__ == "__main__": unittest.main() diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index 20c52aa3f0..39a977dd00 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -18,6 +18,7 @@ import torch import torch.nn as nn import torch.nn.functional as F +from typing import Any, Callable from .dynamic_quant import DynamicallyPerAxisQuantizedLinear from .utils import TORCH_VERSION_AFTER_2_3, TORCH_VERSION_AFTER_2_4 @@ -48,7 +49,8 @@ "TwoStepQuantizer", "Int4WeightOnlyGPTQQuantizer", "Int4WeightOnlyQuantizer", - "autoquant" + "quantize", + "autoquant", ] if TORCH_VERSION_AFTER_2_3: @@ -215,3 +217,49 @@ def replace_conv2d_1x1(conv): _replace_with_custom_fn_if_matches_filter( model, replace_conv2d_1x1, filter_fn=filter_fn ) + + +def _get_linear_subclass_inserter(constructor): + def insert_subclass(lin): + lin.weight = torch.nn.Parameter(constructor(lin.weight), requires_grad=False) + return lin + + return insert_subclass + +def quantize(model: torch.nn.Module, apply_tensor_subclass: Callable[[torch.Tensor], torch.Tensor], filter_fn=None) -> torch.nn.Module: + """Convert the weight of linear modules in the model with `apply_tensor_subclass` + + Args: + model: input model + apply_tensor_subclass (Callable[[torch.Tensor], torch.Tensor]): function that convert a floating point Tensor to a (quantized) tensor subclass instance + filter_fn: used to filter out the modules that we don't want to apply tenosr subclass + + Example:: + + # weight settings + groupsize = 32 + mapping_type = MappingType.ASYMMETRIC + block_size = (1, groupsize) + target_dtype = torch.int32 + quant_min = 0 + quant_max = 15 + eps = 1e-6 + preserve_zero = False + zero_point_dtype = torch.bfloat16 + zero_point_domain = ZeroPointDomain.FLOAT + + apply_weight_quant = lambda x: to_aqt(x, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) + + # apply to modules under block0 submodule + def filter_fn(module, fqn): + return fqn == "block0" + + m = MyModel(...) + m = quantize(m, apply_weight_quant, filter_fn) + """ + _replace_with_custom_fn_if_matches_filter( + model, + _get_linear_subclass_inserter(apply_tensor_subclass), + _is_linear if filter_fn is None else filter_fn, + ) + return model diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 8d0af8b369..6e844530d4 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -35,6 +35,7 @@ "Int8WeightOnlyQuantizedLinearWeight", "Int4WeightOnlyQuantizedLinearWeight", "AffineQuantizedTensor", + "LinearActQuantizedTensor", ] @@ -266,7 +267,6 @@ def __new__(cls, int_data, q_scales, transposed, shape, dtype=None, **kwargs): return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined] def __init__(self, int_data, q_scales, transposed, shape, dtype=None, **kwargs): - self.q_scales = q_scales super().__init__(int_data, transposed) @@ -629,32 +629,6 @@ def to_qtensor_components(cls, input_float, groupsize=128, inner_k_tiles=8): int_data = aten._convert_weight_to_int4pack(input_int4x8, inner_k_tiles) return int_data, scales_and_zeros, False, groupsize, inner_k_tiles -def to_aqt( - input_float, - mapping_type, - block_size, - target_dtype, - quant_min = None, - quant_max = None, - eps = None, - scale_dtype = None, - zero_point_dtype = None, - preserve_zero = True, - zero_point_domain = ZeroPointDomain.INT, -): - return AffineQuantizedTensor.from_float( - input_float, - mapping_type, - block_size, - target_dtype, - quant_min=quant_min, - quant_max=quant_max, - eps=eps, - scale_dtype=scale_dtype, - zero_point_dtype=zero_point_dtype, - preserve_zero=preserve_zero, - zero_point_domain=zero_point_domain - ) # TODO: merge with nf4 implements decorator # aten op to their __torch_dispatch__ implemnetations for the tensor subclass @@ -777,7 +751,7 @@ def dequantize(self, output_dtype=None): return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, self.zero_point_domain, output_dtype=output_dtype) def __tensor_flatten__(self): - return ["int_data", "scales", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype] + return ["int_data", "scale", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype] @classmethod def __tensor_unflatten__( @@ -1091,7 +1065,7 @@ def __tensor_unflatten__( cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride ): original_weight_tensor = tensor_data_dict["original_weight_tensor"] - input_quant_func = tensor_attributes + input_quant_func, = tensor_attributes return cls( original_weight_tensor, input_quant_func, @@ -1176,3 +1150,6 @@ def __torch_dispatch__(cls, func, types, args, kwargs): raise NotImplementedError( f"LinearActQuantizedTensor dispatch: attempting to run {func}, this is not supported" ) + +to_aqt = AffineQuantizedTensor.from_float +to_laqt = LinearActQuantizedTensor.from_float diff --git a/torchao/quantization/utils.py b/torchao/quantization/utils.py index 6ee059f288..948c1357c8 100644 --- a/torchao/quantization/utils.py +++ b/torchao/quantization/utils.py @@ -10,6 +10,7 @@ from packaging import version from functools import reduce from math import gcd +import torch.nn.utils.parametrize as parametrize __all__ = [ @@ -17,6 +18,7 @@ "compute_error", "_apply_logging_hook", "get_model_size_in_bytes", + "unwrap_tensor_subclass", "TORCH_VERSION_AFTER_2_3", ] @@ -88,6 +90,53 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None): return rs +class UnwrapTensorSubclass(torch.nn.Module): + def forward(self, *tensors): + todo = list(tensors) + for tp, meta, inner_tensors in reversed(self.rebuild_stack): + nb_tensor = len(inner_tensors) + inner_tensors = {a: b for a, b in zip(inner_tensors, todo[-nb_tensor:])} + todo = todo[nb_tensor:] + rebuilt = tp.__tensor_unflatten__(inner_tensors, meta, None, None) + todo.append(rebuilt) + + assert len(todo) == 1 + return todo[0] + + def right_inverse(self, tensor): + assert type(tensor) is not torch.Tensor + rebuild_stack = [] + plain_tensors = [] + todo = [tensor] + while todo: + obj = todo.pop() + inner_tensors, metadata = obj.__tensor_flatten__() + rebuild_stack.append((type(obj), metadata, inner_tensors)) + for attr_name in inner_tensors: + val = getattr(obj, attr_name) + if type(val) is torch.Tensor: + plain_tensors.append(val) + else: + assert isinstance(val, torch.Tensor) + todo.append(val) + + self.rebuild_stack = rebuild_stack + + return plain_tensors + +def unwrap_tensor_subclass(model, filter_fn=None): + for name, child in model.named_children(): + if ( + isinstance(child, torch.nn.Linear) and + hasattr(child, "weight") and + type(child.weight) is not torch.Tensor and + isinstance(child.weight, torch.Tensor) + ): + parametrize.register_parametrization(child, "weight", UnwrapTensorSubclass()) + unwrap_tensor_subclass(child) + return model + + # https://discuss.pytorch.org/t/finding-model-size/130275 From bc46bdc990a76e2873ca9ae27fdf5e60908b08f5 Mon Sep 17 00:00:00 2001 From: Jesse Cai Date: Fri, 24 May 2024 14:04:24 -0400 Subject: [PATCH 52/61] add static folder for images + fix links (#271) --- docs/static/pruning_ecosystem_diagram.png | Bin 0 -> 739206 bytes docs/static/pruning_flow.png | Bin 0 -> 236902 bytes torchao/sparsity/README.md | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 docs/static/pruning_ecosystem_diagram.png create mode 100644 docs/static/pruning_flow.png diff --git a/docs/static/pruning_ecosystem_diagram.png b/docs/static/pruning_ecosystem_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..f6562cdf201e73736f06d6e80c13e371ce0a4792 GIT binary patch literal 739206 zcmbrm2{@GB`#;V^NM#G9LYBxjijZxDvWCdMj5XQ!Ju`@GNs=XNsqFi{k7P+xgc$qC z5*f_chZ*Dl_^kB#et*~d`}w=Bx#o1{d7g95eeUITzs~&>t)r!K<`mm0GBUC=YO2b* zWMs5BGP08=sZRnuEpBfY$;eI>IVdUVs3|FN>Ug?6ba1vKBU6n|N}+n7_nR@~AjmE7 zEpPbQx01J|$=`>wEBw}fpvcJi>OAP}D6BF?1u8_OHqjSLrv#POb`nC zHz(%4qlHAk8eh-S(17waF$iQ+6It5#I-#r9`-F`Hftbg)S?I|4Ddv@7EZ`IKHj8ZY z(Y&K%oCKXFm}{+n82FuL${WfY;-j#{Jb&*nCcX<#UT;QIZzviwlYt_TNjxVMt}Cp3 zy<~BNEpjHVGARmt?(H)YmFtBIvAg{50zzq)xbFsV$qTMEN%g>EU-QTI%wI+NF9{_o z;#eL=4>s%`EMV~HmkA=7y;k8O>Ml$xOSDTN2-wF%Z^yNZ)QE377ixwzh-ew zf1Y7|udJ-?^<30(O5FZK1XVBIzh{}zA+;D5`nk{RYFXLx@&#jbrs3y=m2Xk*eW@@hK}D?ZoqFhW`E5}ViFlokCT}DIz>M!qd9*v zTvLo11Ph_zx-I?rBx4(8s{*yM!VBTg>5KQLYm73$3|I4P#YPu}nM7}KWz7ra#wEpF zCS&sRE##dNy^7`PsF$`uVsD$?Iv0VzVA>H(HuimD?#qRAnlbGk6lBuN0lI3vuUuxN z$%Qz{0~Dwn6)rBGV6r544msgY&fZ2v5n@SCZv9cQ?Zh?C6D6{&LX<@c4E*HjAANPm z3)?JosLqCRzoE>aSZL#QKQlq**QWHH_GBBa=&7kt59%-+J#)@&#}{X9sb4Ee(9?Q8 zdHjZx|8z_Q;~QI53bnT%zO&_12t)|Ham;7mqm(Akd{U|~`8_zFj_s+1Qnn>UZkx9e ztwfmpC!y3cRP78nkkp0VaN21_e{Qq0s5Yvf5Al~CpJ_d@(+-}#PQhs#9=gcILQT7- z7?vQ#LzBRXOt5}`y5Kfjg2ek14H27M+@DoWpH=!KdY$Dw>odqz#!sJYt&?peECob; zQXhV${3@);@$lRRvqq<*^|y*%qq7nx)1Q6oWU)e)6DqnZSS$R>PmK6UU3wlW)^%%! z=uuJsV1jYtm`8@D4Z zm2kWr49ej{;WygJ+a22PwR2i!! z4p91_8<)@iL&{OYhr1>#Fl#S-@AmVn_Sd=-W|1W?ie5P-H(lG&&Ej|C?@NaALph;W z;SWfS#3<1HkE$OvLW|1H-s=7gSB!5@<+|5za^2{X_sieBU)!d#(4CUE0IO@y(|foFBR+TN^m|PF|Am6J<@q zN?@}hGUvrL#ETvp#8Y;Lxg^;1TlU-cl%K9Na~ZYX^{oB4Tt901(C;hB(atT&wWe?B zZOu!ckS#}y1O_GXP(n6cJ$=0PfhlyHxwfRXZ29X7g>TUE>geF|!dO|ATiwCT^dhIn zBk%LR;_jO66Z0dVa;g-KE*hm|mTP5c31(LFX-ccz1uch1|USL%&Yje_M$WO=i?<(Hc0M;PCUH)C=y$5*NwV{oB+b+1vTU9%zGk(Jy7>-3mCnEA)=TDENTkpu9 zlOsVQ6uO{KCj>#+Ctp*lg^m4c_d4;*HIwit>!)mgbFwAlp85?L?bfHgmdtOo~{d9c0Zi;Xy<-Ej|KCT#^2% z&~EQ4TK7~Y`Lo(YlGK-tnU(35=^$L)S|nv@nC?f*mrlA413HyX>%((D&+WXbe8uoK zrCbp&Fa)U)*a|jrH*p!O+g#ktJ*{|Gv5O|*CI3BdtBUQJ=FUlLFOP(J$-!G*LBH~O zbSfdVUrb&MrQK7Be5X|q?Ojq0?iS9wv7qGoV6|ViQ}(`veXB8PkN6RU4$DrlOvWWw zgNMu;9Uag&wkwzBzpr=9uLy1$o+~|X^t-OomDxdmtPpKrS|Vb;?|W}EtyZz5Jt<<} zX@6MaTo1EaOyF=sanT<6#uHp*=~gYquk^s=E4bCfT3^>-J>Z1fry-6KxX{7cc-Q+l z^Ylv+E2GcGg~lHH#S)Smbvp;b21q4dbrNniu7XqBzJ#0=!!OHU3VmPrs`_mvQzg}{ zijOn|EG)#lijhgmO1jr1?pK1|f=R-6zR9r%u@zMpsu#-d^ALYq)Ifc_;_lxYwV0U7 zXzAO&vx33D-tyXqZSC_C5IZH+7>99B#4Lc;0_) zofoaKATx5=UZlxFKA>~=OLS3jkud5~=}o^eKlX!fq`F!2v*zDWm&@v=+#CCcC4qe+Dtzr6%O#E81r(`O>r< zVUqhT1*o?>53>EB3xl{kIzqrcnDlvXc0=K(*iu#H?aYsRSJK}mero+BnbilE$0-u6 zAHQGJ?u7WYhzA5LgX0m>DmHhiEYvK5gIa#Zjiry(`MQmFD|TfyBmE^wt@tNnk&W@i z@t3o6AZMO0z`Ecf2iAKZR-C3$D(D-n!AxGf6Fv9IZnI{*qG-dIcxy#bhWMT!8z2_E z4%B^9fDKUt9Yp8-T2=EPd&8iA#hx6eg!-67c3%5gHNW<=I}Ge(wKYj4zjysS4YKK7 zHchQaj0&*k?KLAZe4ggFHT2NF_8Hsz2e}87QZpxUi`3aa0Q4GTXQcM<-aRq_piNCi zLC!`-3AD(8k39Lsf3@$D^OK$UTR(`5EYg9D;vd&&0pEvzFM!YCJ%9f`@$xAd74R=c z;N$-W^v|nlac@rivwiXqIg{PiQ&Lj{zV&Q8?d)8=9=Ul}p8aS8bkMk~8heqEvG5*# z$klXt0Ju#);_$%8+vuLAw2hmKh?T9IwVjB+i~Heo$YlMcfu@U{w-u+qi?gejw7(qJ z->#4b+K1g>F3!JQ;_W2IWpq!6Q_0QKj#ENJR78|Z{uC!Cr>v*#Lup;*yZ^Wy_?H~l zBX4haX)xH&&rifpOvKI89(?`Qty^Hx8{iu^gn=uBy#idlt^9>uy}1AWkbgc$+0M(x z)4|=_!OfNP@VQpjZa&^}TwI3({p;`V^R)AK`1eSzUjJAYut4zPFW~DUqTqi$8@N^W zuvc2g!Qal=SlPh^kQp$B{PmmSH)Q{I!+-qr?;(%eYV_}0#YAsP9KH3(Pk-L3?`7wy zMj584g1H|Fr)0tZ+BNx8`#Y`AfsMT6psf7> z_y*MM@bAQ-(((WO8)%;l=`Kn*^OlTEflN*L_5*+N9| zM4h%gOGZPlaAMJ0`s>cBJo@(5$KQ{ov+Pd{>DO$)eDyO6&9Nj$t!$Z3x5+^CA!HPs zWaQKe|Ho$PRf@Yp9$8|1L-hyP5kbhz`dyuljK`UG>1%9Xh1Caz-@vXV8&)#0h zMUCfsCVWBZ|2V|IXZDUqv^aH&a-ExbnJyj0k1<$X|KHX^D_Q9#+zppPtT3Vw+)b9{6-q}MSpdax&mX|ptE`nuis=c_FHGa01UxBt1R?i z4`CTbeOjQE<8D3VQ6o*^g6%_@SLZpd9N2Cv2Ot&OpcYwkC;#u;@Q?XFd(rj=M7$DC zc#?bJ_Ul#&!wYl3px9f6he#%-%|n}{$8+f*2qtVD-CjQ8ESh6&J%F1`+{ai zneVyo%TD8}k}wc8zPs$H6hg|vLw;GC9nh7;?zETx?Wj8a6zVa*V><}EvvUP!s0FW# zanQ@F56r3+<#Y+Y?8^mH@2T+r>dVzPSEt#Ho#khDU8ezd_3^J))c@tKhQ@Harzy0m z)`L0oI(zikrlGG~6^v*Kc|UE;95D7Hu}KU+RepVn{$2I6)0*ULG?(X70q^lmiAwqQ zWF?v}Bm_QlK@ap_c@jX(0%>gy*6Umhtm+*=l+?!P`CxMhQA_t@}iBkJ$FE zrl7Pptx}(!l_R8FB^<-;N|qv_5?Qse`x-T6Qd%DD%i2PK)R~Ye6IcEVcXqhffYh~^ z670kAsyR+=>JSD;SEv!KN^_*ViYv%s=;daua2;wNdf5dtBXLsQL zUyA}{ucI!T%^aU{iuHXM+@cve(nl|Zw^JriA~c(I(12gQr>M6Q5kI%X9Q?yOpk8sUqjQuWb% zq7TUt(xNQ-RSzPy9&cSEKW6T!0-!arJRc!Febz4#e>L=21~Tv$2=`L2>W}Msyo6r0 zkkGP{;jkF#6PFvXZx*81V}J~gN-;HDW{c%lBIHFAs2Da`XMUCJ(xss|e+q~V*mjcc zr#Sl$$FD?li;9rLwYB57wzqTbW(rk%k4de~oA(V>b77%CQEBPj4v4u-bl_hNlDQ#m z4qUs>_@P6$gk^~*85Z*JuVrHfBRMTdjLWQ%MPG|!iIp&>O1UUeh^fKt_t-2oXDHXb zE4`vtUnJ)fu5mU&)jP7!^zqnug;751USX z^0WjsF2oapOKQ;HzgyC>ic~i?N*9%s2+Oi+y6b;tKX&myqQ(Cd>q8yXIjbp0mm+$N zU?18Op%!nTm*gK9m0?&v6*MzJ3R^Uf7?hg5Rwi!z`X(sL`l`x#J?t+l%rAO=mg&e} z3c~zn1@TGrSk{@qR6b3zc5uhqhvF3#qbpca+$)#~@qW+*K#VGfYt^Eb}r1QtVPVSVg50P^9j9TzMWaecy*s z`>^|^JIAZw3mv1M_%?&^{EI90B6N)`}^S+G7Ot>(#b%KqJO*gOK!)}G+ z#*-t0ZmZ^g6KNW~{@A`{HajzOqff7m!`4C*jH6G8A}T&sbkU)eOd> zD6EeleoIC5*15bj)q+Xm-m$Zxuwlp-wF|r1QA~u+pT_W7q>7~(h@`?E&@nbX?y|Ks zB;htCJE(c~(y_=S8bq`BR*SBS5$EUYn|r5ye+z@+(;!mg?cLCWH~!*s+T1A=b)ZoT z(og6!vRQs5yoAbYNyygPln@o)qf-F?BlO!ooG@C?SjW2lIdDnwej^KM1& zaEX+$1a_wU<92Ms-Zl7;smKz?)>$}RVedr~!`F^(1|!)^KS_D>C9>RLmj(0Msm>MT z(>src=Xph4$>D~EDK*s@j5w7%X1|94Mfd(NCtIe^QyA{)Z0EgKRDX5;QxsZ*XYDEG zJFOfb9PCymkqG4l&mM5v3XE|l(g-U3bjpHtu_>N-YS&h1lTMXmmp?*zC1PLCLCnH# zROpegY#nJOf4^Xvih50qD|kq|X7eFShD4F<^L1UVvG^M=v+Fc|{^ZK?zuWAbTB{c) zD~pMlefVjF9THkegJ1ZTZYags1r_!@?rC$m$>rYswvwVCuB@yWO=my9d?)j-V#7pD zPKwR@&IS|~my);5pS)0B3Rv?uno5C=ZTo!w5)>4uTRVp0if&y0&X|l(y1I_syY_bG znh-{}!r14rM0tmAq1O~-yq57kRbavLOgl3-_CiXgu-_S6x zz9M_+y{ulqe!v!MVs`nP@@L4}E914QiP2vkz6RhWSWKt#m|t+d`X*A6f>_lM>2UFw zef{tF%hH+RDPySpr-51w9SQrQL>(d{V!J;;v zMQop>-3(#fF9jvLi9Nk9xg_pg9QxMyHnHtRXpmZYSzuF9+N>T!N4ph&ut|odPmSql zN)&pZXPV9f5!49X%N{F{+IQGP>P%f*kVSqL!=N~-4$9i+xth^+2X5hZ`y(QB0BW57 zsVZ{Rljl=|0;QDNpbX~sxc@4exztPK}xh?L7hFzdE%>lt`SlJq80HWaL@R2K-Pgw zA2_O6qVH%HKb|2!c($r2K=&7Ih+ihuIsr!gNnb}os8$J1&zxWc&Do$v!J@wKs%RT* zPHHRqMX1%I#|JaRSGIh7B>YwC4wu^Yjx zw*=E-`w1^AZg*mQS`_2UYQ*c^;U9^Vr51h3qJ-Lb4v6dJ(*EzPlsp~UP78YTgqJI{ z<_FoZbJk^dE-^#Q5Q}hkL1GMRu&!$2jd;1?d9IKUgc(l{)zJwuE0B@HDZOvp3qKNB z|6k2#VG0$Crq=7{;Oiktyu<9oL^> zlm-hRlDbD_2YPHQEqw2$$b>c*sre1G`V%AKgoaS3-u{b8@HVni!)o>Z1oUTi9C8~E zSTzSk>6o)p=%oMt_gD04B_Wpp;B9^&GJW~KR76-FWj zcP?v$1xN1$1rdq`QFb?P-W;xf)K7+7S2A#Ap;RyP*c`czSzWCiOEY0zGvpN{4!PRF zdoQ6&J)ciarOIBx5N0lN5v#N5=n!p81a)k5d((^B>5q4f0Tu&Hd-q1@5g97j(JQn; zFYIdy{$-oxeW8qh{h9K*|6S_RX`Zp28j4?x?~r%Ks*_(p9Uk|a3~vugqcW8-`XWYd zCY3xNism4Zh&_Se%s0b)Vn2q5-_y`negR9WSd(Nke$e%bZ_f)eAj~%JkM%5bPt$QR zHQoJB#-l2R3`8;>$`n6&R6&lqpX0%UQ&a?`=AQlS3io;NS&f|iXEnjR7@xN=#?*y< zUe1)8u+DD{Aqc({uZl+6-ATBEp5E<{((0t8j$hw@eu_4&q1`ERaw-`{F)nDdfm4cC zoL(xoLKMp(smvs5!?76{^69lp|07SLbDe@7XQ*j7sPk7LQ8`6S=hT(+hV3)u)jr6^p9pB?++N zkf-4K`Zck5Ldh!MbvViCZ}{n z{<_BU>IiMKYqo(aT8kHvW3cxL+2IST2|}w#HbiZcc{N%XLuj3AY%I1~m31zgfh_G4@ND+3IU zsnW7Ink7w@L4;_Nz4gC&0;SjNg;Ij`Oo=P??OHwz|}lF8A{Ljng0&7j8>Tl&5UJZX&|$m zY8HEVmph;LI0QP(rzv*txA>n#?rGS+2?iie`}&M04RXFQNLI@&e#?KttaRK{&NUi+ zhPKqG0;Cj1Uf0n$jxchcZX+)$DDq-}mzd6!skQGHF0mDP#1cKz+DdyE&bC6om%(&G zuza%@TET{kGAsPBt~*9uvEMuZ?*}>S&vP`PR`|g1{lo@lquF;iMos;@L^}gCh7YhbfaLhnfub_9tU?4esU0s zyH&`@5AgT|wxeS*-%+5m{rz#&zRhp%lV?y+6A)pHH8f&xN=lmA*l>%X1(%}UR2E{SasmimS>wO|8D2VLZ*1-<+ zn>E!uGduXn2D8d@U|OPi=4A;uiTGY=u`dDfgh9%4EpnPjEd8P_&LMsqR&W}cKsI);moO>=D6+zYW!+3)FAWnU>mp-JenunA8pbxe! z7Bttd;ym7I>O8o6@ns0qpsJ!3BHZ*${kEkEle=)(*7oLDG*fp;OUWln;+f%k%L*8EXW26$Xedk|v?ojZvvnHi^%3Qn1s|D-5hD(XNle)h-s_(FSdT`Sg_ z#E$N3c!4OYLZ&54j9%FIb%sAIbC4#(*{SMN>=@gI8aNF5$> z(BI8oQ)UitFn`=hxtkId7;3a>?eAEqS^&u9G}nu+YO@S?;)sWn%;-a}z+WR1Pj_ou z=gmwT9uJ*%q|rZWLIA)JZJ|CF+Z=mmhL@5!MG=SF-19Oiquox5oY}(XT)$%cvv0St zs%mh!*x07#=FP#P{=ssInXMkaY@=ZF%%MHq*{_afZD2v7e!y**(VQX2txfR&YgKZ@ z*zdLWE7Nq0^TyoMU^*4gV_@daA(&y9)ci96(Y!_8QoO(02X|$}q20%8d^>AWKBUCx zSzH$Yw&$x9Xn?mP)u4zLug&gOwOVRX*A8{F&klGl3~jXTs);`J2mh2R0%T;koq8#V zS7G{hjZ2F7?%y3 zqM#s{9IKP5h*JP$58SbPDt3iu5>V39yA@R2(;PA~xjv(UoCQ@?I%DR(?M(*egrL{3Fw-~j8x5A&99Z3??xI@r9C5*#XUDou{M6sX+L(v61J! z22Hp~bD4A+G9g>oYMmU73rP0EYiaNfdTMjh><)iZyu^$Q^r<=m(0E zOt_s!PRKH&hYXWkwwJW^QQ)C9aRDG6p%?+5}3@I3s>E~S9LGal! zFf<(mPx(0!U(Ra0$`0RUTqH1B6U!?(2m(UkB!=V?1qqAc?3MmQ$QAhT{ZVA9(8>7y zQ!0Bk1I}>yio&NqFt_F@0O=%#y;kG|V5E3%#349K{{x&Qs&Wbr7*<@tCFgQt&Vk#j zGbu>yZ&9-j5DKC?YMzVKyX}kdCO_ypbFp%?T#h0%*bU)w#>dA1;#WGLF#n+5MwG8I9r5t%C6zEj#oxo=EMyfSUbaSo2twYsv% zQ4H71C0<|!WHsBaVtsV12P{Bm(h$1(INu-A_TW&*^yjXzf~pdM8IO*ZRUca-eQF@+ z1jSSUX0=dXjg}~K+Ruy7^XQE-v62Lwe8ksI`Yd>gOk+KegiIiJtQ4WaOS))U5iex5 z$+O~6Js@~+W*Nr34eb^I^>)e#HfSrGC^Y*~Mp9~n$c{%xg@(Yn@?Y{DRlkSVfT>nh z+c>Zs?d2p3b+`+JY;g>I;fc@-T0rwOR-y1X3!!NW`anUnvVsU)mVRX{21f8d4 zR71NxM5Qn9p{O6BHvL&Aq%#H?T9R`X`(OfDAAi3zD6lXlD z@xYv49cql-;nYRq<$e>JyORJaxuW6U=aPE&C?BNt; zBu6a0^#?*<~5X2yxWbm>&_Qs{8rRGd{@UD#%o;# zJJ7X0PF6vMlD^o{bY;RBuS4X)g2$MB&88rhZsjNQLt_me^!2DcEAxRt##4Y2kyc3t zoVKiu`cWs-Rt)5I@L_M>Ge?6XAaK!=!xQh}Nu_yC(b>b21`qSQ5yyN`=mkYi!M$W1 zGwa8@5%55EBsOx2^Wv1bVjEhR93B+uQ%^}MIEOo!gJ3mU+|j}N7+v$qo8Vsuq)ES~ zazu6N0d}>an?#xl(%=Ob0kf`bgQ9iZZr1o180zWK;x$e%xdd#3HLjB)#f#7$%CFA~Jfkf?qOk4Xx;Hjr=#XK*O zDOls((H$lWE#MVZn3j7Lf-buD+t1C-?e=t8b!9Vp0S34fJ>hANXm`XppXgnHhiAlM zJ&X3~NK2u*YG0NN>2WBO@jdfkMjYzL1HOKe4hVLyQOCa)%z#U&aZ-QyGz7uJB70nR zy1;}|a>6){s{;^@(}9(FGVwyFA4nYfr^$or<426CQXu309ZMEG?--p1_6d+8MQ9WOwLvhNcCq0dT1A-l2xexVosO0qZZz$NT`YGot2%=)8Dm>?n@`Twu9(IOTmp6q};u)kDio42*3&?uW#p+kj1!i|yg9 zXmvvlG|f2Cki*Ni&6^Lkv=yCHwVLIMd4w(RIfqMO#A?)MGq2DMgMo4;zPW5b(7onO zF0KHIll)-{=tM&ZG#MagvWJ)GqIYWMgqH=s%DgY<*uU_z)8)Cm@U}gC6HtcutP9tU z%1x)@kQ{PG4Z0uIao}V*Q5{&;`aT%-r?L-b9aK1WiDe9R__Pc&IMm6 zLnmkFP3`ib2bWmcP9NEnzcuH10V=6kw&x5eNejRM!7ahVV`Gkd+SSI|hGjQ@^wuQT zV4Ux4d{OUKA{?_Ca_VQV6{uU2ZfAol8^93ZE8FaGAqbh2DlYc`G1{yuxBIT;UcavB9j) zj*@uUJhRpXnM~v2VBinAKNEb@QkR0*`KeK#d5(6pmf_ecgB#>r;di zyWf_bNhbOrUNbiYQQ8VX6wK1ZDYy?}Xx}yzF;=p+fC$%d4uu#m7^XIrH)dyjXo2dc zJ)W%`4*(n3@Bks~*|h4>^1e`Yo;Q&d76*^h*xwu;akR8d0%Z~ZKH|9kZKhONqWV~( ze>ll600{!$HG0WJ!D*@H{2euJUs6)yyRMwR<-KL1v|{FD+|+9zUPi%iY{LN$^DLpw zpOPpyIjQzUlo><hArw^N`k^?phZNP}AgoQ61rh>iwDX0f$BV>_pAP@eTAg zQ<9295Vg4`be#ZYCTq)gRTxBy{;6JiRAi}1hl2d#`r-c3USPVpmjSmtSgMgvPw`Nk zJk9}dgZ<{J^T(_PV9G2zK%~d}%+T1=RO&`q%s4-v`2m<=DNAZ)MbGtHw1L;ueaitsP!+bOPK{!h$j%fKdCxdpM37L{${9*~UL!i&Gx$ z1u&8LRbW%t{Lbn>X*+$mKPP?J^p9q30NFYsmQGrqD{FaP`@g$+M7Q8*X3- z=LyxL$MB&sfC^lfGuzqRG_tqP`yVi9T;Fupaoac)FVIDPAT^x;P{>ko{|`8)9UD*e zA6oFNP5Bgl+&s~pG%Ue?(bCdV@w&abOe&ZiQP(tH zjh@x;^NR;^qv0e>W$$w>j5{rH{OWfQLSU;z24S8*KIw;cK_f7$?IeE*(rQ+aGpaQ6 zTI@f)rhnzd`#@rel2mPm`l+{J755!k*ZS6KlnMI1=4Z(dqRO?5c!2uEr^(NbYQHGq z5R?Q5tH&MaS)%)7}+*-|Lw8Ed!aTD`3Tg8!53HRj$&s_$oz$ zCk@_jb<7UZxS&Ewk!Uvx5u7e7DDV*^`s}mdHvp<*#_&KD2sJPBAwoDlczlXae~L*Y z(lwF;Q*r2xC0ySh-FAgLK#Fgawm&s!lPNzANbIUVT>6nJ?i?zr6mdEYwlEU0|) z90fVGYW(y}_>{bpi%U%+-&k>85sJ-m!2~(xQ=1dO8}-Mc6)c}oDAsdeHim@GI@_RBnB4Bm=bC86K0JA!D)4&i4xk?UyOSf-4mPGpfT6(EO)GPpy z=l{T^egAYGM2cB-WTQDA?Jd<0(+FMSxwE0hDxA>(v(wiI`*8c1ZngbjqW%dhw(B>~ z!4}if5_?des>|@f103FdJ5KZyWqElhPjmBssm;4EAphxkc=O6Qx^ZLt#QNp<>DG}w z(s0T6G8Wt7r$Az=No2;M8YxL8#=!2>I@T3KKipx#x465+9ss}@2x$-K&U|GhKTu#_ zK5qU>hsh_!rLaH6_Qo0TzbR*UF94SVm;n+=eoXVpLZg8r@O`8B2W@3zn~puS_@yVoKG>&1#GW*1(d%U>U1_KFU3vA9dEKC5%>q?7S!!{?6Rgb=AuZ7KSdPl(W)|ZD zwL;eIhFkG@7D=fF!BdfxB=zOMvEztLQ#90nD$a)*Kj>w2@X5Ebyv*@^0{ISYED_KG zkvZ%d^<)4o@v>_u!t13FuE-Vc*C$u&gwk@k%7@%VBM7SuvDjEXa{k1gbJ(J0An=(g z1s|e^@hD+jkn~pOo<{QS{Hb8Rk1%;5q8mj2>tzq#awT? zBhDpvaHdTxLJ)NB0a zu2iIjv*g!Ir;X;)nwp!lh3~rV1qKDKbVMim=4^!{h}*2wL;Aj5L@`}b;DypP=7~8* z)fqrwiL{x=3Pi{bX^_6%3igr^L|+Y6paIHA=XM)<$l-FPd7{9yp?xr>*DZV4-mfX@ zfZ#V7oCY6$_sSp6^Sd(Ol}(To`N1WQlmum{4bxKD%o-<9O0yV|umEsGbH!0dO)WGA zs1ZlZ(2)agyBv$espkOa*8S8N4AsthR5e@Dy@E@LT>j1)x%@sBFC#6zexbJ_B9`U- zhk}5Do5JQVm5FD7602Oo8L_X{i35cObuz)r(OZIIE)Y(mP_gXoXR-!yma zDcdq~DnzIB=8f}R7BXq(AMzND`6WK8;Iek`&X-N{J}ganZ8wi z3-fSu^XN{|AgyOoXu2#gIllOtyvNNj=?-U>n~j_RIU)q zLbU9yV;sUh?RoW8#HIJud>il8P=wXkRbR^C5RblX*IYTvJibOm*5-R82wM!Ku20nm z*jVct8cHq^Bc0EMeqz))*t80T8)+#6;rpmd6J-GfJ{6Hwamr}l0gN8D-k4Q2Kk&eJ zCwAZBL;771=@9`v>7}h^1q-LE$r4y=U3x`9+*{9y@BT6T!vr{6av~x_Dyt8f1Z?(V zy1-5!j2kCTOWb|hm8w!4=?JmnK9uTzs^}Z48*-Z63%|z4$4d$ddQ41AiW?e6*)Coj zM3Q2tD9Mg`ghM?(e@NVNcjwvr2v-Wg7h^wdr^OCr!+j%Zf1qpVw&9QDNS8RkmRlgX zlsbC&C^PH0ouQc{E7bRGCcJ-F0{bkWhwUIFWtBVs=@$dGIOfLzxECvvH7JY6Kkb4x zCFApmt_}MSn}~%PE*MHvv#+h+XX7j`LBbrLNvUHhX17T)*ij_7^&uT`2Iaf?`TjU; z-p+S^C}sSnq=ie7L7Aq@HEO!Msa~6()`po1!0hvJZ(6W7k-BOYxbjA?ibJX06zMOs^cH)eL{a;GSIvmm5L@Ck={CR%h0eeE096#%m-k?2v{+ zb-BNCAC#}t=B^_vdA=i`6u5?o&Dr-w@SfQ-`AP&o?W%J zvU(CszG0b`%c1Ai=k< z_Twz`tljZ@J?Jw_ybBU%#T@L&E6|96gBURuyx>0noBe-@>rirS$_$_7Ki$3X`8@fp06NLbatm^b;30Z_zwytc#vo?ctG?Zy{> zji{&I1~oNLIxWLPj@Y>kfL1mgcSH3y4y!aXhIi~gSuxj~XNm=Y{GfPAi|rVT6%D*E zaYIf4sM0m+ojZFRT)i1MEHIm7c*8nRFmFESDsmgE(uTNc%kNdYj5U9Z*hP&5m`aQ_ zr1##uzT%86&ptryZx0?|7k-agfUU9k-WA_*Nw?Z6RJMzKR*eLhAIlnA#InXHKTHZl z&2G~66`9?Xx7X5-8Jq=?z&z9FhHhaT!a9C`o2JMuzb@UirC%f^3HTAZ)9B*|PsPSnz?;_Rvpm z-=6K*mmvpF)64vvc8l)0MBf-1CP{QVIjt<17pBw_#@~W3UviUMHQ0|oa}JlC#EOIZ zG-Tf2{P4v2a?Q%EtvYbU3Ro{{!vFEPG}ie@l~#)^iQUEcpaQpnyF5j>n&%e)A_qqQ z{qZqtvuvmCsB)5>{t?t^cXQW7YkYtmDDxx@$;UzGCO~kNyyOB_TwYA? z3rLeMwqkEDFp-ipW&#}gE?}C0BTHdi5rAp6vwM}>Ai;aqW5YaW9G}1Sl^(Uv5geL& zJ(ldb)_3Ez0L!Sfa#L=h$bFKxDXUmm-;XUF4`}{Xo8R%C^I|NFAHF<)+%=G_iUeve zQgm@@#kGC8B+<<6YIe!7;7c z2A^Q7O>C~4ujoOFg!!I3-O3sp zoAw#-cTFewHP*i!=i?A^7eN|`)a}yts&1vELdssNJ`-4re=Z2n`6C;DP)V5F7UCb@%>XT{%+3_3zRldhNhxH|g_hwfy;P|}Xk*WrP@{s^?cw9XAky&nS zZXP@O%~?>A-u=>fO~x`X?MF%(Y@-vs4n1I5ss_5pR#m}NZOAHAc{-R8ZB zkz&bHGJj}DH<P}*2vf=`9xzsm8U33yuvcr7uz{Gw}1S1lA^Vb4M2@^(yfF_m&n+oMPsi>-O)px?WO%P)~4 z)uychu`VP2&tn%%&Ag%6%Ih4#xQbx>ROI&CGRW?to@v!E?a zB*(Z4$75S4EMVj;hja!INjGC>TBa(K5!OTNKSXF&e2Rf!cWpI=s6(~FP^9r9TImRVMkj&U3sIj4IUATxjAY{3ZvZa>CPSr?K?ZhO4aIpop>P{S;>ra zDWKily3vH>V~s)vk%mspf2d%rna?F~Yu4F|Z?=7Q-^V=3=FmJ1AOk;a}c)3AF({wr9rhMdxc#hBC!+_&fjeHL!YKP{|ZZYxPx6#Zd z8gDqpuRj!3Xv6Lo*gM=Ru?e!hd1YAj5JJ;@I2HGfNsG_+bh2ZRC@Ww2u!;Be-qIR5 zCN0aFirm~>_mB5mZoVnnwuM`soE1$c&gK-6P>fo#)lqTDJ?@=8(Kc#Z-(#bDijlU} ze${riyB+b5E;aQVzWe5NwZrh@tWm1gRvhMGEqI14hjhLG`Rkoajp6QMs-Zm7(DpRj zywla(FEP^mud0=W|M`Cb$bR$Q8ZGr4>jv_Uq&o^I3iEhx;g|Z7E$5Rvo05~0HyNAz zOTPVvo%)a0Vv;lzZ0-iSz8Y!LH|U%ksPB4pS33Ee&HZ@!z5Eq_kI2crlCp7qNec&3 z;dk_T0}<3m6ZYmEL<^dP8`BD!D#1YZ@nRw|Zk`yzMd!)As+DE0-rBpyU){Vb{Mek9 zy##j}c(yGnZ7YN=Dmz#tL^KV4+1)0knl_sdNDQ1@G5^nB-8&0|qdtn@ho1CkCZN_h|&H0|3=B{(Lqw+$Z%j7Vc_N9q`9~{hFnwSmqF-}6wg=l~gpOb~5 z$;zvY^+l_2|9QGv$es0+GgPpl<)*V+0H|ozLvI< z9N{0$ZG!Ym!9l^g;X*$Jo!`|}bRxLkX!rWnM;uJLEvqvzo3zBqiKHPG2WXdLipo?Y z^II+4(?Sl$1oeWQ9)GPVD??5BZ`~UQQYSY|r0;jWi(jEqkm#Lpty%(4c&RO~+wK>; z?HZ?2_v@r&adO28^t$}${x6H54LTV;nIC*hdRrQq3oYLfG35@(t4-@^NSBhGp7yRg z@&_LIQ=p<7^pf=|jw8vvmvnKiC20v}o)Y`Cp z9Sy5yY}x}GJGCXIT&BaH=&8vM%np{UYT&m$LPF5lu{U!Wa`tsWq++cH`|km}h#UI( z1t56fyRUoGF8Qnv5f;P;)Rwu5rEXDOwA5z(L8LS|v0)<4S>|KrohNgNM*retoPm?M z_k$ggeNKOwyBnTXjdfl;TY7|{1&`Amokn1j4%-_9eJ%xIX_gvgbMf;fycFNK9Z=5C z-n9z+Vp_=F(-^NB`fa2`nB5Z_*>b2HE8AP#d%JNrILC{BPZ33*DZ^c2a|!D~#7Gx5 z^Sm1J0JAPA-Wt!_yYt#HvNYOJ#oa(|Y;0@*Y3LGPEjM4x9RQe&o`wP{tN!ZmK%fwC zlk~mjKFZmHZKX!n!ROl$BMKc|Kb39a^!RF;!X-Kn=lteH`>h(Y27*bqUPhUKU$xd& ze@@Vode+bE=&|0XD#ZR|y*_)><4jLQPAN zd_+0|Bp6W8q=F4?J4aKIQpx*~qZ%2>w=ErM7AFR8!Ew{K35)OImlylr`L+wkV*Gr0 z7J`Tdt(yZHwSH&|!nPl_{GuBIrtFYcJFjQjSo|B&vLhVyt6*Ve4K4Rt=#$1X5zHQ7 zXm%PY!X6*@8BLeMHgi9;*57$t!i%>kcMpXM?tf^YodaIRDOrVYM$2vCtG3r^!Qx!9GIy+Q-g+CO@hC=C4 zbs=l-=eANYqMwmljqnCvN?@@7yoHk9g8u`PGD2sy-W$R2P*5K7;*)ACi+g+YHja>> z5qIkP-;-lYIO0-)+^_R=Ti0X4uVtFd6-HIM0i5w%~!#Q#Y>#`Jaz$|f=cd! z+fX#ludmVS-<$M*@ueJG1SAIhV0wRk@ZH0%yNDmqIfT0oH;b%?(lAuVdA@_DHMhSF zJL&68EC6zE1|Ux&UA9j=&|5i$a9yMdgD-O|t1E>!jMX)zKx3#f4QpSb1P+1!C?^x@ zhyd0p4~rZzp%5=T%$^8#F&{=AHleF$UB%6SJ7QeD0g+cKfn}g>XQ3>ddVioP8av!g!Z6IIDo&Tx&eWNmCvJx#6uQMe6 zMB!u2@Kv3mobL{vKL#c7I3k#P72!UkOkPeYj%f00y3C9+l_gwz5}yd3XgDdI5LTJXGf@9bMGk9KjACE&Z?*{VoS&E7GZbbdFLpmB(IrZbQ^4 z<%a|hQGNau@kMRUR$-l#m&7Ay_$<&M-R7>-O=F!CjkhH@rO~xgD%Zb*vE{0h*Tb7M z5^iYmBzHYOJ0=`B^Kh}T1Y~cnLCyF5THLA-N(tcN)_G}FtOpT(j>&Hw87c;;==6*EuQKe2Xo@6mx(>_SJ*fu6Drs?RCu=8{`SD zky@d0M5ENLCBbRI_VwvXy9n|Xuj|F*z9vYIMvh*OFtMoRx3XuBOLt9cHS2!{pQWba zRG)(@UP5uFCOQgP2`VXfU`2{z8srx|+}vqW*3K{$k^M-p-?L>(xBzb#Iuv ze_a6cI|$4B#LZ;R7_^{MdZ7*@3yR{7+2SUdb1A0s>jX6@mv!g`)vO( zE}1~c%`K}%?c|?=wBLaP_hl?w^JYwl%;z96uP3*zYPCWBWAgr^j}WIVc3=YYS@Twq z=o0cSHME(8DLwfJTg?e31?nRPcIz2FMVO)Gv69~y%PA0vPf}SwzT(e6XKGEBX%Kgu-NnQ19a!EC1m|>(*O%4El?>jh!Vc2j|^P$fA_KNIwu$hzB65co$op=g(pn+ z3WnMaD-(a8ZUD03mb*yjCTq-^&It0*7eU$M7Re6UemDN}(lO52V`YlH?q~n?co?5s znf^61q*n{nJomvd@dn-ENr6(9Fhd0W|ByvoBLS}R@oX+38hUdVbcP*>Zfum}pVw1Q zS5)3Iyq^07Cnp@tl#lXH#m_*)6CpUA$cOh|;`Z{k=BGb)BAnm-4f*mJ?4tW^B)D`F zaP{(D&?M-B={Kt*huLNQng7H$zJZyPFH-5(Ke)QP>j6`at1N&eYgbIRJAhJkjZ0gi zz9#qb&X`&|6_pw;95Z<^Li*UXW}EBUpLu|L#~lzw6w=BnA9&1V2$4Dy!K@IJTU>FL zl|y8|{F%m(dky~E!)*7td7q~4K z_BOMndDvndOAUi-o$%_thS9{M;Ga7)7#}Om`?NJix|_U*%-JnIZq7v$y@yp$Q{~nf zyDkHgLKD;~u|=#uxswz0B6H<5pDZL);;~+$T`x| zQK*~oiqGfw)}l{IraUfCbk|s9Y69|gx7esx#%1a!`TBz4&Kv^h^<^5|JNw-+`}>f> zLhqeR?wIqNRw!SC;k$b+r#6kIQx6c%lXsQql*8m-CXhV4mwMC^J6#l8jK|eNva7YY z?Z1^)wUa+TBG92tuSLy>Tde_W11Z>R=FI_KijjHlohf|DS5!UQF-m*Q$$7XN?-)KT zM(qM%7ZEiW<$3aDvjTNVF3v@F0$cI=wp;f~y_gL^lA95sme{q{{9^KO_h5WAjYvXm9%AKg{(3>D|7(6A=bg(tJQ#jX}eZWw*;9gIt zC`MXOG!1Oaph(`4KDMbLNo6gofmXqtlUhHJ?UH!7Rm+%Oje!0#h}u8AqYJ?0G*#w= z&HjR4+hk`21|wDX`wX!<=c*c^tHoeC#DDe=& zF?bh7aRZ4EoW=LCs3?*FjJ-|j7Mk7RL`z{c!s(yprZ+VS+-E8wm=QmgCm~U zP2Yv&t)J3r{NV5uQTnTk&*#eDuRfS<{PlvrRMAfjU)Gv*`}syz*CH2!bA7;g14F4M za`Zgl6CdN`Fq=EC5V0lA?5*^T{)2H=;u z*sBs_zF;JaBD~8OwgvarLor#eJD4pZ(9>Ko|Aih>HzDnsX9^(BGze+xQ24F7F)3N^72{0;U+?V2ent6I)gdN zD7ITJLo8}D6)rUS7oq*xQjxS{_*_AnP5P&UMwDdHc*~E06th<*`o7>iZ~uBbgJs3_ z3D$JYgLsdS&hqAi0iWT5{(l&=;wN>P6RL^&=XhoZfp$-`_A?K1zl|Wgmsai~{*^<& zfX&`p3@S`{d000$Z&Bzy^&@$g4@$NK3^X6TYJ`tf?=sf@?4brp%etR_afMcxgoF>z zcG8zb9_) z-8<#ICY-38RK9#yXXE3*Sl361@_He*c;Aca3)&m+2TV`@HbS8^;s%2!&DJDh6_!g% z%d7a@>u;%SY$>?c*{tY)dCkt0n*C1ZIYE%VFL+W4rrxD;I5%q{fsJSe2-xsfu%0j7 zvN#+B(;liMdF6bwC|}%m^{^iDR_6GVSl*d%2bhEss21 zX+j>592F#Ne}Hv^PJ@@!{5A(Wwd8608adOI!o5~Li_Y=xSrDypjED`AFb1>wgM46K z3Z0kS039<(tq zHi|)eaGE?6hg-A548Dl=Vj{z=d6|a*4#dg;4)!#MWf?yId~Q4_i{X&w zyt4!jp1R)D!E3ded5Ww?pX@+-v)}L21#K3vc{ErEQ#Y}0{m&%La?GB82yaO5Xx|H^ zD$P;X7rS17dFyek^zm*AZanR*4Fu~OJkLVuZGo@(?>hkI7=O{(z%pHMtrl?W5JQ#E ziB$e@_vXtR47c}VjqzvyQ=}R=?_?!p6@=d03nHzdspm4ryuTxZzvu)pMl&%Kjzk*G zN|!I=G7|-To%h)oysVC|MPH zu(j6wtknE+NLQA-&+(XEDnOL^{2AmYCay(iwl|)|L5|SZ+7XE;;J0EAu`zX8(Ee@q zbgum^mJ*WD8!lbVPd`E>MVEl4D#ob;6L~8yF=A{k_QlczBQP_`lJVEJm>7E*{sSKI zv5evDVQGv?DjJ?~?8V7EM!S!1pvrlWUN^Uv3*%D|nsxf;@vDk&i zfP|S{=^oAd&#UUo&1{R%*meQ;@X}okc*A2pxHq@RR?;!=h(*^%IUAcv?yBE+~Ba>n~o!-0z!v${5I068=5AYk5M2IXm9eh21zyOt68^~Q= z+V(K*p6;^|mCxA)g#*}hk+5%DCk%dTwiMo)F6QL1&^Sq>4AYgprl|3sWM^$@qcff(^qSt^w7CVI}NVGeoRZkAmj2`+KC5U*7~fEV42G&k`a@+ zZqEox!UxzKc(^`c8l*R|FFP&cdrxxYXBzz&Ni%JXEKMj07c(k$_n-IDASHFaMD1f9 zu8>Y}VESw*X*{P8t*;wLoI4MINWWE?|@7lXcZhB@E8!EfhIZwj4p^@6UceowwbsMr)|NZ3%dbxxVK$?(W=BvJ2IhDf84if$G*v}<+V3M8tT+V&B`LW=s2+IHiq20 zO+7slg=bQ+oO@MsNJuFXh!3vlhK*4(4ifEt;EBF_{;79R%2g z+cXWBH24NJ*m#T!0PWD(i0v*f&~u9Qf&U)Oj!Y^;{U?CA?lm8mQ)w}dJCAa1;5E=q zKbrw|w_%g`*OuZ4kCV+$r)fhN*V}L;b}<5odIbaF{PaXA=al^pA%O zb0sQZ0agEjejowVWo)_;n@;mz z)Zek(0}BygNRr{)YBKNQ!fAO6X+Ut>%W+~2S()aF?5Yc1G_~=LfZlpjkf?G59o6n- z*}ZbcnvuN9!LGc%w}XPBf_T zJfZtB;FbBGWKUxtq6Y3WQ!}G11pn$qOT-k0t%n58nl{^}9VfDD%Px8nzjEPA7lYb= z$wuMpjQH2m%Iqd2w|?S(C3ma};xyCBTrM1e3`{=VvA#D`@oL%N`KqyVna@m+y0yrv zFK7cB_bN2EDVnpT()pLj8PWK?pPHPbaPyBvCI@<`Z9U6%<$^h+!bzm zwiugiohNl64Mx@E^CGLU(q3zPn=&t|8J+qKl?|2Ebsfwr?Q$PAA>tZEVE$bHY?1P) z(eUP4e&2+LcAyT{m5egWu6REUn`L-y#{_`umJ?>&{yZUH^)6UyR$xb}yoh3l)gPss zhL*rTg)1ib3}L%Vr#0m?~hSaXD?n$2UI#2M;xp68J9#hk8e44?< zIrysnQy16AcTZ8SN(eP0pa$EAtHKQW)!n(*Z9~fukyQ5un5mQ(R}j zUf@^Ah7wvUS=$G6GdJ%bIU;xguxb8ex$1T~IqE$kx87ZC2bX?CM$=WzeNmMK@YkEI z44;)C4IicT6Yh9nL20W#c^WMAFe@WnT3CXT{#fXZJcE943VCdT^>y7+BxZApY{{vk zlyz(5_nHsK9wqEP5&eZi3186%*N)X1acB7a=~hAPCJwAgn=hg1RP>Gh|y0RFnHeGvXg6O}I!r%Z+^y{SY+Ai2J4WaQISinW=KKFH3> zf1|+gdr7C}@ViOfz%5b%`WmEP9d!v(tcDasfuWE_Jx{%~YCyk>B3JYVkM7%1Qz73_ zmltc`3%jaSUQ(=elJQUDIkc*V}EcwdLe5=!6Y~u3PZmqh0=qsnGb?12h{fjG*FMt|ANZdQlu_GPqR+9We|_)BngjcTSOOZGsupK$X>y~vCzY| zy`*k>^?V8CVUF@hb4W4wh&%*Lde~+_Px@eUfb+(q76NLm1Nw<4w5C0A+{u4*CuG-T zn@%X-o7UyR!x?P{{Wu&G4W955&fHwHVocr%Pc$pFtOV({?YY`7(;19%^`ulRl5yjZ zq;~Z>hHhGVKpsmL5BV6&zU{yQ#&)l`Fzy#y2eXXRT8oD7QMl<@5p@)!cZlfCzx1$(W{ z-l{oKBZkY7WvT+SV%)|S#Og*>-JVB-~SC#GR&z|?T7jr8l?=9+Y z4PPq}|*T1Nq=8?9!)Z22HE(>g|!t)vJ-aqOpd3pwd zYnLGckbJ7=1;O|aVC+{!s;jwyO^wRHSY!Q;foWNa-n%?K=ca?dB%El%Y^I8cbNOU zc4Oy~Y)RA>-`bt>@|niSUS_sh<9*RzUq^IP*?mDr>foCt#qTV!ZuTcB{PE zIN_wTSZao(Zli6w9Xt0c-{32R&q5tzF%zi zjT5ZA(zk2xW|*uw;(k=r3%o0<=rdijKb(}gzr{t|{Qel}QspE0B&*9eRv1=arHLLJ zOZv1jV7YV0Z>B0wxgZhOSeO6Db=FDkq><0TzOyO>qo`_@{RLVhNm8R7jPGS`{jz(4 z5jKaXd0HX&xhT{nR#1hdM@#L^rQ!Abo!h3z(L1-j>uZ0f>b(91eCmYxFl0WdkxF;K znx$^|x(*co_N*+k(`SO13!>}XEQ8KX9JU<}gFKr2_w;(3vUAIkG?IwbL}e+Ntsrql zch2?(?4(ZcS3-F|U=zDV&I-xcWnBFZYpY_yf9z!(IJ z8k3+q!B48+gs-W5gC0d4#L249FM*6`Uu=#i{P5AC@y#_RnYT391f)R`rLy%VDUAOJz{VDA~AQyBeYamYc zVt1ZV_5rL;J$tmrR^W1Z-LD-3#}vW20pK@{*2{9&9B|jXKIV@}D9b;$Ud{0hN*I4s zFvLQY>Lx6UrdcJ0+DV5QoNAjF1^sHitwSCF5sK%3C3dW?0(?iPk-HD1!`A;}u*Sco zCTKyegav}K=5ujqYO5!NfY>A#BMbDRFws7JbwN1h2n{;Hn_nYVsX z9k^`_?luUt5DJ}$kYQ$Y`DOtXkl&SMck(ESPgh$UL8B|PmhZJ8CCR-WbfC&v+bNT>K#nE?&R$qf2^uVDna zKqd!6N!Crr8~{eIhyzRoZWY?`f%#}>$K9x?M6u;csyzLtWfzoDNF(M&$>#*QA746_ zaz>7p6J{OyNtBL=L+Xr(qetThda->N{Ve@yTduvj4WodC{#QPrjgeb7;UNgty+x#> zt9F0e$AG5@`D7)lC8hbGO4N2^%&wj@cnYCgnD75D=V6 z)Aj+(?Xd1|Ov9&RU_%@IL5|apqpc{owYP&aZi$H&dMNLz?^#8?ME>{S}e*__9f9w^I`D*gKb9fPUrL>I_G$BBg0 zG22A|K_qHjI_&W=?@pkci(cH5>q*}j#=uV;i~rBm?$LidoV$Ooq&B;gq92rsns&P0 z{Wo!~5Yl~WOIE(uqt`@LFdOd3QWePc-^)_ftEl&?lmt&H(=H4h+~8;o`y{N9Xcq3G zDwBE>M}D_GgJ$a@hdakZHptJZ+^~_`306mMX7@oe^2@!A%Ks($qE>zns*At5{_jzm z8Q15ng7i-uT)hJpsn7UsEe&T^D4L5lruL};$7Ialuu_{5oxZPE@$BgsoqM-Nqo*t| z5#YzhLDpZ7Up-!P8TG5P>YikI2Dq_SppNmD^x8KFS<44R-ug6Y5&q+@nlG<*8}Ll& z!AoAQM$MOez}2zDh>Ze2?P!sr`n%v8qP7NEHD#h5#&u(Nvt|q2i2cQ?B~A6LP+}=9 zR-)*BN^b3zP}*NZ;PdLIs4MMs`v%tF0@FZkxccvjb1fkT;F5*e;R}Rr{dY#J`!8)~ z8O5Wr#ecpirU;80B-<>OfNRP^x;2+A@~DbDzE+Ws6+2*qUy+ordO9_X;u6Xg@q}dg z^%@@Ti2iqFe$ED}TWI;vHNe_dee8;4LcDyo;d7e!fq`|NDu%b$qzm9OsYZ0Nq!|dN zl&QV~$E3~X*JE-4?-a$ZnjXiVXqao4wi>Hk(ICcpoz!39lzgu(_^#H(`l{)U(YLKK zWnZ3`!|Ic^-II=Ur7uzI#c|Y@uQ)WI%`5MZD1)Hx>&^Ft--JVhR!aVs<0v zuE%Wvwi9h?-q37}a9~bkU{%dfHjdHTlOzgpp%K^if4c_Zb z^+U3Qhi5wd_o{~>do!oXk3{j~9`AO7BvLWD*&>S$abssW51hG!m$bljW9Rf%kFAy7 zcV6vDaK1lQx1(*Ok+U*f-uViA75*LH&8knmk+P|^yZKm9y4F97qq1dhUx&SO)dVlI z*CjxD=I+PlQP-_xr@<%FnhT=s=isv^*y>g_m2w`b_NnQ8d$#-Cb5yMEyriOnM&Zc5 z0!>Hbpm?a5P%`@WvdYn+0D?zfySDXlrK zr`@JaWpgs54`(^PdHsG%iREACnw*ysi*YQr{Sfev<0R#E>!hAC_am-eZqsSC8%EtX zye@)J-0a+mj#c~_QBYe28;po~*V><}vtPPpq!$Kz#JQRt%FTwIP8jOD)xV=3q#>Jk zUi{7Rn{z{m@hSI2-19LZf(c((fy(>Bo%M0lsmy0QJ4fMRxS>*~PS*H8iR(Jk@Zuki z*`6L3*wLe}H54_U(MTwyYNqd$c_#o#==P(FnN!?6g6FoFL+gZDK|MiQ#g1nY55Rg}pt#nB*_NAL%Mk$C=AALA2b8S6 zkPXIntAS&uM>Upzu4Y-HSz zqX6mmU+zEQ^r(tdUG!Ova>1QlW`A@N4Xsw&Op^2JWOT8N$fYzNjVFMU?nDSwdfBoB zVt*dILB(^Q*65_(d#73;NI6AA>9TM8{8r-S>X@NG%S?vm6d%DH+Umj@ENch!>3x_C zRAmC6z{*z(T`EL|>!VRcC=QBkJ^0+qE>~b>fYimG@UeXgwF4+CWd`35TrhwtCwBzfAx2|^j8tUwKtF6o9+2}N5p zzeF;pFK}hT(eEjYaelSdqpBp>J^S$-*MK{AGaZ8sUT^4xQsKqfgGWK*rt$8STMyr< z*)-3zjVeU6j6T$>bqv>9v{MM_0o6SNKq2^5_RZXCv3p`zqMm@y@>5f@-24vJl_yPc ztj%wt(srQ~VHkIM6kxLb& z#;!QU!Bhj9D4=`F2bLzrm6R;v!=|)P9K#z7iHs0yE^c|bvxQ&uJp(`>W z^UmYnwC*jQAZa+{Y$4~mruX_64S|_;AG@10{rIjijr^nsk^e2qbKP4 zNByxn{7dM2x_)GE8r7-(0B6bpF1KDl2IJg1Ia}%BZ`#=b7umrfT@{DrO*PD(F>8Fr zWKme!MY&Lye$RHg)pXLf++0ZYo6<7Gjm2Z~-Zh&;lIbOI{iaT5npCTOJ!!yYwwwkG z?Dlet3T_(SGin;8*ZbwrX{Wo_%aQAm%R@@L&TK2g0X=n{-lq$tnAt+f7}|mM2jo&U zpza$1%1sdb%}gteQU`?Q`Vo-@qWHNm`Wie`OGh*9HI&u>;K)%ueAMaQw5Pb;gg%UH zIh`pTMO1mbp`XlJ(}%Q=a+DCeIua*A=F8?y%B6lw$Ocgc*|E4W2fTE{vT08+bmCU0 z>S2y4%Ky4R2|uCbrss@k+9hz)DlVl98hBW{-3BFebPiZ%ze2vi|6NW3S}R&v0eO|H z{b-=wHJ|{8;wnogh2?NAbGINI2+a+ZEV(bOq2dcG;)vo8?DO2#NhE@l@GDI^J3sbB zBUxCboCs*o1x%0@2D3cSv8+CXf^%=TYAmcw0dEmxSb!`e!=v+6RF1h6+A#n66#He= zWI1)U^_NZ6{niAuM1*0c2Vuq{(XCjPL5#~MF`M=o!k z_hJ6uAKMgfF6~WBU^nECxAQgwRc$yQ%%YF_$cwWKD>oB;K+!{kcJik&TTK!UuAlpv zoxj^kR$d7D&Jvd4V%L6E;y3s&uWZ@I5Enru{N&y*e%$v{!->3h*}zcLw$Vn2hlG$r zrnI3BJPsjq$EV?Im%cB^~Xq?$kMt+^ATBIU8-xpqv(DhJG(M_5wR ze4@JTxMdFlmz=^ne$ky&Wv}N!nXtmH6JqPkE2=$Xkr6T*Rnwx6KMymB1o0e%ApQ}n zsMizUQPC`5y}tT%T*2+$td?@WP>H(Cd!0qoe%zB{oOz$Hu;ShHyB2p9j9{J#t54K! zC9<4P)+H$={gq+iA+N-CNPVeKydTKH~XF zd~x+H>^m1>6hC$@-KjGf`uR*`JA#dHo0eShw)cfk|6{;WU+#0+4I`RE8eR*1tvYVF zTfeHFpR)8xHgn&Oo74Nvc}~=om9x7HCi~*Zg#Y|9svGK)j&M|)?t8~ii=fK8DWb5jbcubsAyCeiJE*$c))(*dh>nRJxi{h?<>p1iu<^w|d z3nAN=Kr+(}0y($NF7#jL9o0?B1IJz)pY*R9iE&N*+NLjOR;}{l&Cd~*lhvD#6s**e zfy??DgtjTi8ZHzUAaguQ86VBLWfuaA*>ncvwCPOh$J0AlIAYMN-nx7bpR+>V#HZnW z+SK^a*vRLeKKvfkd~v~xdq<-{+9$+y-W5LAbHiO_n~_qqeO|&BeI>JBU*zg00Fu)E z8;`Chdqv);a9|I}3s?PfbvnT<{Q36N?5@U2dJ^lU+O&kWm59eZX6xdr)7*rjQN2G` zimJ{c`R}y6+^~6vq@M6CQg{6ktL1UWm09nHt^stoSi4_J8=n@I$AA-Jy#pHttt9pt zT(7^!1GVi@a5IYI(gSfV=k4$E?z?Yvygy!(m9jM=%vD;&6(qOXCKJ3F=%B2X-UYZM zy)YazIU5^n|3yT?3!vwi9rzXo!8rI;`08ihE!Gc9BuKc=?o71^wub!6V_QeoxC#zx zyT5Nbe<{`VsknEe-|8c9E^i24jiEqQcmY3(&A;MGJPtS5)#|pD*Sd)^723me!!m}z z)Gbp!eW34j7&@WFK9aX#UP)8nt$5HE&V+5%Y(t^W^n_$KO2rTOs-x03a&Tg(Pdt4s zS`2K||JWlSSrU2vnZK%|9JC*`&nJuVp!t0`JC8AST~bz@%^f~Din?lD8^4fexh{7~ zzMlX}PE)}+avj23Nn6W#FezuHZGAEPctqJu2gjaI9v^5t^~U|TtjdJDg5Df7Jf?Sh zr*UA&sluBUIW?p}D)?+{2{8_RA$atybz!p2WbSP2KK8`#+V0Y3~mrvmN7dmlYbb`3EMrWNLe0hAIP z>^hgiq%7K1pwLkrC@tP`H$x3@3H3*ATU^$uQ?8bcwkjk{U3#q1Y}`yV36Q&m1DTBp z+wn_&%P)XX&PhJ}EEw4?^Ao!h-sW)eM8QO z#{5lFzcDesAUsjlD-VOq2%pDmR)_AFDbJ9iboT@Sv6Nm0J^l1Fdd z6m?yylW&pIxcn{chKiNfj9la(Y|8ZR6KbJz6PZ%w2-cR0T)Pxwn+< zRM8jq=BI4ytq&6ANh7j?^Hy3%eih)XHn7$XtDtNPnKJIo8mhFRvqT58QRL7MDL?$8 zgWF4(Lo4@b+U2Ty^TU^NNUJl)9c_A@Hp2_mJ5tcLFft-oLyO=ygT(fTj#omP+@e$@ z!k`=4OX9;AoW~8-!(wl~L@MVu(6*^>dl>mr)8C8|W?D*T76NS!R1B#V#R7}LycHF? zh;UA&Uq14A6#x9GQ;Es-!wvf0QW}fM#tI19emDpGcixN4U#muUIOgi;_667V7c?hXOkb(ySH< ziuC!79b=&7_WU}$3F_(J89JjfXz_|B)uieYFyx){$n_P$>hQ121@d4Nz2&umcVvD- z88dA*j7}^l0^h^uokMzp<_3o^JAFH1rsbR+1Nr_REb%{~Memt2P*~wQu`u89_i^tQ zPy}zcM^U4NmTgrtha!g%&DrcKX2YKZfBBw0R;U$QHTMNR5`GPF@N(-|Z=e(&Z$Cbu zWd^<7Rd~upSl;uVlJ3!vJ%k*XcT7h<^4@hsv4mx}A*D7|V#wE{1(L*wZ5?0Qvsg4? zj^+V>e#m@~{A%7@vuS_%57Spx5>L^+$YLIre zi7i)u=vMVInuo?i$rMh;*(oxqDDn*6)3$JZ%+3jW&%+_9IEE4e6Gp?DeZr%Loy?!z zX{s1pQilCM;ioJW(B2zvKZ+=Be&yheDPm->`W$K|vs9mw2zOH5o0 z814TESJ!WS>RvPQNr719#KEa7`N`tNv4V1MNdI$xpBa|Fo2hh(00@}qnXIo%BwRI_(`xRDBG3Uj**}1ih?h3+DV5md`qa?ln1EZzwENv zF5olzw(hBb^k(rn$AkU$0tVm~=LStghQ>Uge7YB)!J+uGz6bO$yRX}^D|}jpkQA;= z`gK1yvFn`IKVtZ3w@z*#_0{GJDcLZVMNIa|s;t<|l$p5BIvdI`3#PxMTZiv~DyIs| z@g5mI@{41YzdJjscxYIewzn_0;^bVi*XHpe)}P^!3j-%r zkhfUac`p8^34?BU|9Rjd&3XO4>>{_){EDWJ;SNTsPBV7Y#L5pUre#Fo$ry!Vb-=T` zu)lP!wViKLmF7?21Hw8;#ObxR{%qn*;$o36JaMPn_#yUV6n;k;nR=Tq@>dK z`W+KKgnbk3X+os_v12`AKo$fM_6*uz4<-(nMoPe&t3KUcLN?>%fq{ z51iJUA_5!PS3q8dbTv+un*E=Ei&xzK-XbxgQr(wWmFHb_zC998ijuOu6Vfjd#wok) zKk|fCCa(9Cd$X$Zs;SijE58|s9rbgvw}96JDxljA{sDf{`fb6PZThTcI#P&g;N16| znYqDBvp7e0^Nfez+w|QJ2l*E9k#&dA^`-AlU*x<2!Bv;zMC7aaZ*>3jO%QT4D%o#- zZ^ZrG0(@lDov>Za_^mQ=#3H2V@*f`0mGC1!v_2?Q^h^>s0Y>DKO==ijbLp{Iy*Ok) zrpFiB(K-`GcK`;P^NH_jY;Wzd@Et5AiVGe;WLliX%K@1P7yXsH6z$Ht*(G1x2cJ3+ zc-6MoT2b|%@@?m@34+P9sC0N+5?ns&9A)~`#e|+(9Obg)%9Do*H~O1pS%!LIt@CC+ zwKn))?ju$WfS$>Fkp!P;*bux>sXG0Pb8_?1W zp4txs7|jA#^FafoyRR)92DGEu6(~%CCx&r4-nu?^eAj;28%%$;9(gU{7SC-rqasUk z;sX&Xn+ToA7DmI+CDl&?p|Jt&Z*2wBLr~N@d5OZep(1=1tGD>b!x?fOGQ0O{I)m3Q zqV|P>WYN&JvCL&tt>$YT0Bb-#p{SAx&{|d@v1r=}Seu>?=|8vqNDBYJefqMbGB%6{ z(x1pd$3)pLr?vaPaG`uh@9Rj9_M9EE<-GU@>SNsJE-;dyXUoGk`hDB=3CBTn2A)SL z9#r3XQVWN;!GgDYW%u9he@K}95;NbU2>Mq)UjHF4_zvdG3+nX<1wBhe*!RPYVUKHg z{%)$M<>wg5{!bvmFq!Q*rGrl4sx^Ur!iTdN?J8`P`>)TQY9Xm}$c1e#&`N1YoYblq zNV)rh4}PCraosDF)9{*-zWYO7`YOhdprj8_{3b^%tbf6&S{ALdxzZ0OFe}G-)H{aM z^iYurZSeC<6bq|I`Z`MhzAm-XPd1KYZMYPSqPF)jC!7)q8>Y!UEsthqxvSR~%EgCZ{VR5=h z&RmaDWg(eI}H=^LEAE!&%KaBhfq?=|lu@zsD!8B+=Segz0JB@{Rh zU;iMvR`}GwG7aMaLYkAx2cKX1&!*yL)#7|!jQXnf|%%W zXN#y1L)!RmHJO9*$aX^C@BfE>eAU|?tr$V0YF}wIZ5P#NJMJaCLsr>LLpO6RP{rO; z_mue#F6NeLO<4?aD7gx9F~&(7zyG|#ydlmsTP`>uN#^nJ=3UF;R#fHGSu>I0EN`FI zxRG+D1Rd9JD+CLA^X_>D`10+=YiT`s`SrK$e!p$Hx(w_&&z$lPgqho!d+prk&()~% zOh}%8yFZ4>WF?UTZm+kOoN;?IEEHJcy~);0upeQ+>(o%_!DAsQWpSI^zNYfCWP4jj z@!o7+!B%Nj+fR~vfbp>Pvd$H3&Xl{H>o7Wx5l^4RYNS%W*4MR1hpHAeJ{Qxddwll!s_E!B&+gI9 zj1u|1po#+(v)qkpOe2d2EFOQe+QsLuP|JyDTe6frUgZq(a{3ymdHE@T7F9k>%4ZdR z|BL6jr03RIwYoT03U|-G1a&4Ji0j=7@fsIxV*kENm@qcFv3X7D#7^TiwG*CW&$X1Og=Y&GVcbbcm2Q#&A*y4xf&*G%XJy7xP;v+P^*RRZ>m zymUHp%|!iTcUFHY&4zrUaB+#^b?lQveoHEEmp~Q+H)`z#y&)Lk(4+fEt;}GYC3==? zdo!l4OQC;-OHK5`GPe!pmI_|^WZKU1y-;RfpZEgmo5pfL53gfQaNKs|kSeCgUcWJ2 zyxa19`xRZk;57;GjB00OX!$s+HIU_3#p6!jhgf)$Zm=KzRTS~*0aiW?t(*s6h|yQt zsi_NZ_UiVxIZQ|W?okGcz95aPp7t}Op3hWYc<=(gP_Cu;%?eHX)?9ssQlL@J@^b5$ zzl^jeg7|fxQ%UPH;~;DM3`9-x%K^Qfum*5dxk9`fmi4TB`*-Sy<6&{EaPI{BZO^xe z*FA}%zqCWFjFeimc~-bxwZ$i?|62FK3Rg|!4~1hgdj+8(I@3N`!~17yHoNwwO&>O{ z`ku=6q=@{b6B6l8Zc9C!eNNe;|BJ2neoOif-~LN2Ggo1v=Acrda^NUO4k|OZR_5L# zwcKfLF?WU|u^c%_t;~^I)KqYyymMP_14oLvzzK@@rQ^Qu-D^@^L(7= zQ-9+b^%lL(5$a4btqe8w*bLKK3dOH8W8#hrQKye+oR|P&LpZLrrP`~dpwpbVAlBE~ z?s>`;cSR&gNq3!|vqnyA6^+p5jZ7>>&O5rUb(qQR4V^ch$lQG133V*~*a?~VyEAI+ zbNh|b;`%h;pDNhr@Q!}Pjv#o z2*LZ>M@jw>TMg%uiWESyjGaNwGdqr~8k&I`GL94K`7Uk@9hs~xP#x{N1}QNQ5CM+b zzI+?8;Cb!Yey;z(Gu9;q-9^5lVwz5FSw{Da1(2^?g<`L<^(>B`E>I&F9*7l5k zJ9i4Z2Cp@}J4EgQ*9GIrC-&oyC*uLKXJ`XoywOo z8OVvFvuEvz>A~AD!S=g`6d|8|n)?S23DjZV(pk{m9PaKcANY1o@)(t&W>E!cqGDV^ zoQ>4xO{V&uka&5WZz~UHf+(8Rwp=4d^jkjLlJC_W?l1p)p176voIK$D5RL8Z!MetF z(E6u{4i3o8oMF+cvK`i2w-0cQ_v|Lx2+9t;{+ZAF4w}y>G6D~V5;Shor?E%Y+X64r z$DuZ4ZvT@X=og^h=O7hX3Q6p0UA-Z@9V}S?luh?e9{PWRa?qxM1Z3N0Hqf@0bQ^UJ zGAZQ`3uRUt^=TuI`d{u)e->-}veP){LB*o0mZs@hhP3yhz1=3x9WDFJ8_X~(SN14z z)e-oji=1oisAOI^c&~4XSAnJj8$VBdddZzHjesauYDL=}xfyft9`;c2gU!@)ds9>(Gr$b*)ab7V&Ip>@%T6V&6KUKhk+S zV-dPS|LUCb)pkCpB;-#vKX${rxeFDxM|Y_R?CN@(4`o(n zOCXY=-#PM`DRs5#__lHGgY35c6W|@3*bwSZz#M>^3DAq)*!8`;9j$D%koT{S$(^Hv z65gfuf{mYdg69xCf_A>dXh%4>Y12}7ZwcAF(c+=JnvSXvV3YCYI9O_4UTa;ha$b-1 z*>6H+(CtJTw^Kr^8v~}FuS!qz979Vpxl+{jdPn9;I=}4#+CJBJGI2h;hyMNQ^_<7R z&b{~7s{GeuE8B>Bn=^L7gvb7#UR_V1dLr z^V-iq3XJ@gb((Y)$mlVvU@erAp44W`FQew1H4D`s1U;qZW_#=+<8jx zFsueb_UWw{db#<3tf~Kzlf<;y)D%qB_C;Wv{UPJwf$Ga^Z3IkZsNMI%{ML%{<$Jsu z7CFw}z9vVkYK0Ca<~I!bM@C(>YBg90nSD9luzX(eah9r^_pkr5OWiKz{Imk8dOqn{ z!+@HD?M5`hES@OpTsamTt?Ty-{}yQ%#Ks)*zB^MteY&JOTLg?C3<=di-v?fj4V3sb?tGwA1O%ie z{kkJpV<7Bfa>g6XJ!6Z4OtvStllZ8+;^~Uy@t&ZYD^X(f2)FI}>(uRAiuDf@y~!Em zdjfkO5o{Ovz4*PKBh4wJ=5sKzE?D9AnvqfPfw$v@v8nSYpWdn0@r=q( z83w_=7nE@~b(#p@UO2^yZZjTvTdJP3(4Ja1Y$nVP@)}9Ox^Jo;lGu5_kAUo$clzDV zri$1CH0z=SY$l@#^Xq2>om_UZ zu1i`cb--w6F`fx2)lpX`FWmN6_zDuG+~boOR+8fpWyGBJB=rD2WzjL1=T1P&$u|b% zzr0vmi=E=giZw>?VSucecAyR_vxTgzjuIHoqHB{HS0-jj1Dt_Hq-JtJ43?X|xWB|?GwOu9DSPO}z5~r&lkRk*sP6^!z#^@zwv|;O(F7I0`UgDi3 z84K((0T=6OX}iLYY#QG^(&%1wu3#Oassl&5Xy1>FaPAQi3lz6Of zrMBItAe^$}P#n~pP~r(xUU=4oZEOVglSEYbgAQ_N2KVFz}0Q zAv*Yex^92&*&u#HrfQ{Xbd|iYU}v>;t$8?+mndoZiz-n@iCO-~Q7GCA5hT0K0 z3q1o%ApsISdRN}J{8gKj0BqZH)B_N^LBE5)Bm~m?(|%{~*DIh2+xh!-u`iw9^wshU zHXh5`{mVO|*-ZDJdw#@GMlzJ zL2ovX$9#^=gjd%}S{Fkt^h@9UQp;hS?&^=;mv0M~XeQSg-q<4}jN1LBIiOIwAuC;CE+j&wt z@@a(-GR!Xna9!d(A&8KgAY9Q@5IB#m0qpvSdLIt#K*iLx9t6$qFT1TAaa2vW%sw|i zn2kTWK?y@K#U*RC+3g$H$Ci=yrd!AL=C(>UVY}R&h6taF-p3nWqZA#3(i+bzi-+8>- zX^!!IraxqF*1QC(q=G{F=H5%#3;$L-diK7BF*|kn7&r9?3s2rC_3)U3hjt?AM^`fR z>N2U=$n`$#LT%thbeRBu*Eq=hG2<4Y@ymg&Qg|8Mi`d)%ZvYk)NA?FcudDm-<_39# z_X&jHAvXezDTy6&vzyk?GhQC$gMO>eJZvW0AH4rzg=Fwg@2{B$@Y`>Wt;brqHP|m;!q#iY$$g?|4}aXt&jN>bOvUa) zr)YrA(f22gRY9#f)%^|8mf=uXT^d{)fr=pTTELi6!Lp=H@iT z-*8Y|t=U&5^02~tH6#w3XfC?m28|;4kFx(2>WKLP^7P=E_N;%Yu1$CrfuMUBZ=$E@ z#QdlGKd+w6F`b8d<=XuE-u{=3Vdej}1$sAfVfa9E46H8T$m#WXYX8x6we;v0(75P7 zOKn3?S?mdf7tn)$T>5l;MYrU- zZNr2cMeo%^=^e|VABD}`8i+4I4yn?o1(vFK@M5hpB;=85-4Jf@-iNsRyn*}(ZkJ9z zAI>mfhbRT1HWe!P4P-f7oQk1Rz1!c~xm<;=^`2G=kh-cDFp{4{vXnJ=T%~pU=G@ac z8Ns$|9P|ShIZ+;D*w9NORW-bk@iO@r?w%y=LXG9mFP!=9IWjjUFVvJ22;>xMY1EA5 zV5(|#jO+=9`4#EOT$oiX8XVX`hz<#8AHP?zNgwP$&*G`2XBAAFt7U>m-*3>fbE_m! zd*S%=w%@CC!}-Jl90b-$+T*?PJV#_snW=@IJvz7gsR)gd8;gak1XEHh&-9iT15Y5P z-S?&wy-((-O$EPA)pm(KB}DxYdwqRUID=YH!kdS`r$iqzQbpiN%GyxO4|9ijo{;(G z#rV66iW`(qKAv$v`)H02I0+1CMPJPMf}Le=e5+!3R`}HO7})Q%S4vg^zhqh_(`1AY zYw`Jtz6&VXk$9NJVrpLQc#1Sx!Wwz4chH+UBp~CS#%c`bv;LS2y*PzDZ+I|Z)|<$l z{)I>P>>2WU)A437ndQ>QWh)is8fn*L+3kdp5sp(2vF;y75=!WA1<$hp3F-~QE zJ}oJH)b%}&mhV1kA0c-QtzX)7u~$D(^#bTigzI9gZ|ZdkNOpNnK*X{tCFJ$4c4?2~ zzyTW)JUKm2&@W)uzSMkfEO;X~AMf%&*BRivE+*-kZ6ZM~gvf(^dlrOetm7@a#F z($p>=kB=mZ%MEVK1@JY4-|9(agu53zTV$~gjXe0ADskyRC%h9Sh6HwdtF2ry>&Qz& zKI&&*x}NN3A$I_tCE+ui_{)w|ReexuqNc{Mcf`GY|lSIBlNbc za_yi-GdQ&4ollQBF$UsEd*kv@Fq@b5M!>cgQvEYT1AKSFo4TaDean}jcW$xwJh7N3Svu>;cqYir_={*@q`jP7s+=h`wLbbskd-nVO zY}nR>$scR+p%*FlVs21O!d|3DmeRAZohevBRceuTWD9LOjBxmSiTyxmzm`7&500Yl zOg+k9@8Xub_qgPcYya|;kV2C-(+ut)NT9a`vI>gyUhhSp#u7n+X_4KMcEk4zAV({K zTX=}l;b>0ERF#+ATUlWDiG?1E#hj{xBG^GuJT#X*D(`+MR<+>b5J%V^cu^I;YW6d0@7vvZSpk0+SALD9ZVUH1x`?F_mgQ#unQ~Dadiuv`F zniDVwPPq}L*Y#9m4(9nv02?h}wNsQH!e-29zv@&Kd=I|RL6abFOA;LaTog@G5WGg$ z(;Cdu{Ly-j7CwNm*r@Iso+3jPjXdYgH*CWb60%mhFTA*@>axSaV@u)B+3I*pc_5}{ zs@AVd&&2MJoTr-JZBuIx>QNDn-owJeDxv6)b1LqrJ=p#8k-}3f+ zN>obVUW<$s!^d6g#XgpUU(n$P4%&yaDD}#9G3Nhudc0%f0Y6GnM(g>)yFZfC1Jt13 z@Z~CMe7AIB#N;eIk5=57+cXL#>is6Xe7@|ssV76ASgLa6{1kE2KHPkA?;u%8s$}ud zV>*R$C>_ioueIjes?s-SHst!F;HU0fIpD>>j<|g7T6L;6qa&1S1n_;pdr6ss$#4S~kccys_vo{hwdvd=tQ*bTXM~+@_@F%7WR z*bX!zF~UA znj73&oWBcixeYF4N;QQ{cy3#bmfborxTciT{7k(l`?pkBda{RjPN)%)!#{8?m3wPV zXybYQ`%eF@W{uYJ$bX{HLC=ov-Qg2;f^L2aoS!XSE{L4E&wGbI#@~k9GwKAlm-H1Y z>SJl&Rmh3ryl%}~F*cy_;fK=hC5uePi0H<25jZV@vr#~S4m?8< z2dPUbyK7cReJWTu`8)tD9AbiZxQr6f@JO($uzpx~EeUj+<#TE39*H9ucnNGclxl+% zmfFx`88WK=>h1^SuyN@YkiM08eX9MAYUo>SYs3%IFt7a!%M%G;p`j`wi+&6LdhaRr zJ#7=~n?v_TuYi9KlzD1Jg3$@he-EdCEtG{Y>L^0Y^S`Cwq7ofCeqOa4-HJr;mXi4pU)`Hs9Ww?`xs5x#h6+(xr5rjc*0@+-6crRE18I)s z#=wu00)~X)VMVowF9IS`W<9R53))k|Q6d%kXYVA4P>DSde8*CNn?Btk)!tn)P`+X?ui@?wqZ$jo~?F5P`(UDvIt zTQjU#b56a>D-ngf6)`4|g8aH7Ik2D$ZyXXzSqjN2{_8f7{t0sO-BBM1x!!yA`oo&9 zjfTL)y+FWT9>XQwqECbT0MSpbUr=7_@>foeXkmqM^@;Ahqv8YU(Mf@G35lCcs62cs zZkeg2k`IKW#dc=3th@OP9@Fdk9gvdth5)@4f`*XuE}0>MVZX zn8X6vcSBLTAXeay8NC{Gglrqc0G>^5{j*9(`RvhAO%nbb^Pa9YWRQiRDo$F3 z`8xzkCKOd4Bt%jZq!Z0&&gHbovT+JFLo_+P?LMWgaSBkYQUj;bPufq6bT-Z}Z-mhR z&4oQZM_vZ(d&`ms%(;59Gb=`b)^$6A9Gk;tSDk`tULNPc&`rYjUwh~gGhOl~q*H=R zx{jp2FPhE8LDZ;Rc6Nmhl3BVB_nczy+qbw$&Zmb=Bd&w!MD}UKJ}0SRkH2E{ahy> zxwoPK5O^uqhe@`g}jgZ1r0iJI)eZI)}xl{;Oj_Jy4G{+w3x%o;Si z$QObQOWGFHjTV7YiLOxu3^6``#j{&i`E@qMWP-x#)n_8xnNyA{uWq*s+)~2oebu(k z6)$5{wNE#Wppt3oYgF03;f@55K5<`C;UH0S{*xl^rfPbp$k&V3vKRd8_=0v8Om;Fw z28$;L)vq^e1un!SQ;(R)dh=WluIik4uba1Ks%>94jxBQU#i`?{>*cMa6AGADm4X$b zdBgn0^>LOrX1jU!C2-P0T@Z|sG5ljP2KSljD&SxJX10&e8o&lOJtup*-yCNUNL;RU zx@Sy0Hmx2Rf?L@o9SqkT{29@+$$SzS2__?g=J$Z_FF^}z%4c4zTJe^k%!4+SQk15$ zwT}#1MYlFv-7LYstObHUZk+d!YXrul?2cNw9D3;eZ{iui@%rY5V1mhZYFM$?4UAWl zsd}U&29vIt(;O*#B>7b`qf#*640^%yFv;tK6sgiiuGP<=$YkFNex8Ua9sP5?(`?%F zmZ$0V;|2Tgo^_N5(yrfsyFgVW<_Hl%URfqJohR&@SJTHU0ye_d>Ncr2_s@WtA1OaL zmCw9}`Pr;F(%*6yQWm(8N|_0r_L{0>X|qJO|MwjGYixHWpxH~9$Lfb;@rd}cXya4yeDXFea|Czn@i?H%+wHezy z4P_t0^Y0pTOyRaldB+IUz&U$H+s>t%TSI5$($D$!_wKl2?B32A(f zG9SFG7MV7*>Emde8BqxxRi&sZl`hz83oa)6&i?tvC%IOM3OTn$~pxd54DpP+~p=H8J076dPcrhO*15X*qX6r>N0xwJNS)TY3 zQ)sy30r?C2Zef9tYA;Zal@^tvm$kPQiuOI(x;`vgA%?VmRd>z%?$o@T7-=NWVCTMl zWXsAI!!?Jr^luIY-{wD{Ny9O}f$ycqPP2|XYCSM5fJ#x&S#K)#eD$2J*-B_hD5u|2 zj*a(d`h5o05h+_BZrzc}70wE(8P5DY$7ky(a2$JCNLqFj6*2r1cdsNah(H6+O1~@Q zG_M&k`QtNneP`_V`2pOi9NL*s5&*>yxWACj^>3n78!mS z`f)x#7@|R~IPIfcEzW4q53lMT4_@FC+H`44=%@Ty4xr?DR`h81vJFl&b2R*g(d*-? z2d7-ahi98g^jPj9QagZo#5BKi>f|LXJ*FZta%xpOWm1ox(E5{ladyJ<8Th)N3%4PIfF_OC`gZKmHblin@7-1tFzhoObf0EI~PAi+}8l*}cEB&Ur%u{dI9MrFR zs|g?CnD07lFwz2f*jF8W@FsI(R3cIzlU@h@t$)H`zGcyHXY#JRO`r&HaAwmz6)3EG z`sk3!R|Cx_^`bwp1>ge!r@GGt-X~Q21@M4>uMB=c{*vGuK*6y1l!hAxM!7#vi>mVK zXYh%yKoh+mfeE_k2i7+48{L({@$iGU+T}={@ZDIC;ILWr=TCQCpWt~1=_0J{&z+5=bP}<$|`n|xkZe2 z$JhJ1j2{}h#7rXT)R_pdtsZ8PfE4D}Xe*X5(<^t?OssI%EnS2Lygef+lo`<<;;Acx zVHaTdq}+$%%c%!O7-nQKnYXb@+BUr{6(Y7F4Gs-c(Y&G(c||GG$^!okmmciKrF(I| zCi;^jRgjbZ2CDDmY#dpnGlK3^vhwXohYedIzp4;gSq+Gix|V?QtpCiJ>$>{>MVI_< zjj2(2pTacGTFX6%zAg$*=okoIF4_Qf12Ib)Z+S`^y9pBHG8e09Hn_$zBcT=91~q#t z&OrxYSI%fp=ntz#sXDDV`?op^U;Kma}%gpFpTT z3-k}Ov@0-q0+j~@7atHBW$Ho{J3rWsjEf-T(C{ymp~OfS?T#lHhE-IYoKe06g2fh5 zSnT04k7mM))bnf;q85YJ>)UyMFG>2DBb&0?Bgqo1VPTdFLGb0E4?G7d2^D^lijXLR zpn}Fk4i9a*$Kt$pw=h~F)5G^JryX*Fc1x)1Qh);s@1Bdl77Gm9Hg1ErPc}?kBNGJ{ z^Gld>irK={%(aZ_1poO$xhrDnX@qu>e7dFF)oxLYI1h!>Z&v+;CCMfKPfWN0*M(*h z+i^&}hq|YYcIQ#TJ;?qUS@PpcHWR5{Gvk6v;f^h=*x-eWt>Bf;M{4}d63ewJx2Zy9 zvKK%}e((;Q%QixT!PuSA7T(vvU5@-hU`{Npb-D(S914-a{UvytlW|{W0cA_&4xN7v ze|oxNaXXqbl>T9zZhI9@F_-Jfn&-vcM19Pp#mj`BQgk7juR->z_#?(|nkx|xBSoVi zCr&bMoKA$QM;&v>wpmqONO00%;w6Hw;jhEvURyH8(z|2 z)ZpO^OuQ>8`Alu!7P;O%V7O%D^LS3|(kU;Fk)?tsTg)XPLgHg_V@d|K7`3XaF%J3l z5>E-t7f{l#MUz`B`!jRAG(;iwUM<(FOO=a>clX#m(m>zP;eU^1GDdC&% zb93l)trJJXzxIoF~2`)0iHEo9<*!d491=mZe+3lf@oxHC9<=4SJ|ycwlgdu%|}R{K_h zN)E3{^G~RVr_zx=UbO80Z`c?*Z=u9SpDkTf{<5_BB654IJ8+87@CIGFT3QCz+H5}% zZaVU6twB;(CvjT`!pH6G@UeUk3)ZQ($EV}R=ew206i}#N+ZUR?*K@e58)vlmtUFh@ zfy%1(W-bi|i%k2U9lW82`g(RX*WrpLVC)ZlN$wPaW@fKz;|jx zyKl#%v)FVvpGZ5HO;b*wt6c7ygG^5h4Rkzbb_oRE-fS=GhOQ5fo1b{}zV~hh8-UZ! z-EwB@ZGT5;w>07-kh?bPO(vR|wMz4o|JeBDhP%$=s*m}nYHWn}Yb`t|CiPi@VxzoX zN?wRO;4c1KD6up&Yf9`Z5V}LUAVMnVrh)GdpSR^)aJiDB2-J}}yI{zp_=qSdMJ#AM z4Z%ZpE`f5aWV;9|%9c;=2OZ1>txf?E6twRTcCxEB=O=0{(;tr=3m8NlZ_=mUIT&al ze(tno8nX$sNM)2g8~lC4dPQG@I&xk9`Q}Aisl|~~OUNG%otd;@i%il5T4$;ZFw&89 z#y=p517YUt(z!FLQ5S9|?W}?Wvf!fBZ2aT@d z_XDVP9srx6dms?KBh0E}h%#F{r4)#ZiCFn` z1EadBD%7dONO?dVRmz(Cp+r*)v2pg0?@$Tu=-$1sOqutfA_Xu`32!R>x0m`oZtg0W zA(MP^=SvL99ivp^nV)veJGxx2xze@e3$)?yD-+YGfJaUF#yepyzI0s$9`py4~t*0+g}6!Km0 z0+(R~85ukGU87>O^JN{W^gggBJY_awS<}5rJ;@ePxouA{iT1&9mlGdo%eTMigB%@H zi&x2+lgn%iDsknyN zCwMu;jRNy4%%nK}5RxD4)cIQ^JH#|eJcjL$iIK|ZZV5RJjsU0k_YKfp=H)L3KeoEu zq{Ou7A~Qci#OA)~4e)hN=H>>fXH=CYPq^8QJ&^zCVp*f}>RpR;lOa+Kv)Hx$ujBcH z@vwcgNNX=-=|iyuSnt1#7B86*^O`UBef=x4bOl0nq%ycT6wq(GioY$I?MP5N9d*Hz{j=Hik&9rN=RPTlm_CmK)w5K4-Fs z%*6-Qhe>hJeG79IP4R2t?R0%(%xQtd_fG$U$t&=&a<@zkg!R+uoH)}ZbWP0m`#B3&{a`rH(*iR~Lk9iq_b8{u}2jE#UV#dGe zOEPvpDS}$7dVHrRIKYLsdV$&GuxSM$BltE3@lSKBMeXO)0h+YGRIzLOFX_4VORB&B zb2cO@X%lPF9Q4h|<3sB!x058!PB&SN+*a@HU;t7kQDO^W5tO_9{!ipy4K6cMBHQn8 zDTk3o(N63ORt5@{dGLdk{#5d^dDSx3^UoiaKnMa&S41!S;}ZAm=H<|M;-3qNbMuM8 zZ`t6`O?MwDs@54*WNn(<4I{nY<<)>OT!b=wasTnFLv>w(lb{vuhi6>m9P~7!pFlU% zuZrg`Pkyuj5Tgm7)u?#d$4n<)me<=6+icrxISi@JZ%sbAogCcFBBg>*?RnHy{y35`R)Zw+AIwc0_ zX)dy#>YvZc#Q1LtC3Eaunr1p;ASOUOrRHq2HQa#N{kHy@m*XJuqOa0 zFb1v?o0i_@BSJRZW)YfI4H1gB{{!!xc{)?gYd?qZEoi2kX4mNHad0*A&CA;g`aLyp z=&_Xv$B0*9&YvqwZemgim^9ntoi1KE{FU;>7u&DqHzJGlL8-#qQP{-+G*b5R37r^p$_N?0oXObySQIQMq zrufKopZ8n~42{+14?!y@v837$59qxF{3yN9a5ribX2wo>eo z`N5&ZVW`+@_S&1!tDgNh&OKhWYf7FW>OQmK6Gi!Rp%pC!{8>^ZFXK`4Pg_?!VxkP! zTkLz#@g^RevXftLkFySt3NHa}NXP01xBa-6jRx+@3nj(q4rqCg_j(TB!8RG zC#7ReHsMEPTN3y3!abkg<+Beh=*|ytB3$6yC4XWEeN5P6N{!<_N!JfNtVeiZn6~^m z-DT>%l!70id8MiAvbC?e6zQ>fb=8;tLnF1R;xp8uE*|BP2u}q0wP(djW?U!&EvpSl zWUF#n)}7U4?GJYyOggVUfwYY@ZLUkUQTd(%fi1Hi6t-_v6+y!~?jYZ+E(Yc?=xr(i zy_^2QX>#8m@H*>h@nVxy)Fu(ACQKdRhvSLj9j#QaB+jg7gFu&u~tNqo7j2 zt(3ZP9yp!*X!2|;-i7O{C*;e#ri`+1t!y0FuBuJnzNOby#J$pm%N;TtyE%XNiv+pz zjNyxgGSR$V1TdeOjP?M0>5a?4&;`&G`|t&JXVg#%wn~~YrT@4sU1Mrqlb7nW6d0`qM!phTn?2NdYcgCXT>HsP{=*>HK*s7lvk>vZQiVS0oveq^xj}x_ z^ZJeT=~at*8b*N};enHx7NigNVS9?OnrhMfIRiygIWq}Xy|s5D&oiCS84fB&M7OeC zOXh|ud((Kx642V!H{+IcH0>gX?IT+lWRhK6Z;U~NKeIX`RINM zBxcT(RURTI^)?9jch#id4;+n;l2yiyGfKG?SyJN$LY0AG2~ie#60q>kLlX^>6zk^jcpO|E7er?i?}jB=?87cJC9gk zus^l)vG?5=*BQv}U;xTF4C*Tgo6pP)|4m5=s#7Fq84#C+!*#>5bS?G@wHUZR%Zto> z^rt+*Ol(-!ZMi48b?FC`9uNVLf#+9q-Qo?9FhlI(Oai@v^6iPY_BiP)oyXL=(md)N zC_FhMGiwBt52(Qp+MS*3=5>A?2JZ(lYo~tiV_Z$((K5mZ)0#<7CH+f7jKgl1`N}rb z@Y*X1WW)MyIgK$TsW1;pwjtf=Ozz{MGwROoD$`DaC`gn`ZrkarE?UN(=Q)!?7qB}%GPzAaCg857DjbLHXD3^jw5M(^GVb&#m}l=(ORCFZgl+kg zDihZRHAwg1XaHiCHOptkQ)97+EYU#w*YD=L9i%pyT<-fhUAf@395Xow$?w#)uAZti zYUA09JSdL*j$?yQ==3VNRby2qbA1igGklDQh=nT;sHZp^J&b=(0;sbfvbKEd;q0b& zD0!3`OUbntzMBKeDWhd(CXh~qF^}*ItkgGxj=#59E(!~pgbVDEvN9*Mt>YqS70x(O zknC@=!q`mZoB&E_bru%HmhaB2bn$=2Bqj=_tM}-j6^4V?_5SMCTO;uu9z#2z*Mm#+toL>Q4=pE_>Pt|M9ls zpfeMQJC%%IxXG71GQz)b+`{9h+43VB+=y|tE(|qeU+>-Yv<~$_ynjT}vcWQL{BXG^ z0JOm0EMn0*PbvRh`HTUU zNbMDP4WIqyH(#!$g;)l%nled90QhAo6NhYr3}5n+x@0j?f37sofnVqpUpub)6k}dR z26$7t!|Ia$d-X3&z6$tIIF%) zbUB&o3DjCifQYPAwg$?^o|-W|1w|AC)xi6QvF+N1^^1 zjZA0kVu8|^6*YLcIO3~p0!)e=E&g5aAl__oK<;+OnA_2lI_jIifb0@7Y^x?)PbRop zl}`GR0US(kLwyqABzZn7UqPS-gXdH%dk0g=2u~sZMb}!J^neqi6Q|Ts$=2{BB^Byr zxRAi&C04s|o`{I><%QH+QITuHRapRY^-kXqF~q8Cr}aCb{W_GkSPfKsB2pQi#*LuRTR+~MOP z^6DSh;5(#rWh9k*Iw_92X~4Meg9aVG^LGM-rKQ<8T~aU9#6^1qZ-F&7YyE{(>2XK7 zCecq1YyQHv%BrEZLzEQ&|XrW3YA8fB*>(TuaH=b3ugD z42*6ch=H8K7UFCu_EgB6Gk`E8k}6G~0FRs99pV*@dimg$F{Qps-|x}?-eSM_x&7N> z`+J;XBp#~LKb?R6f4Tr#^|wX>nL91er!PH`;>6nm*13Au7?2lC{QY2l|6j#*!(lze zLdjg`3cU-WF^s&|Z#X||{d+4Tzd zQ+&saLNZ>NF;7j_HzmxKBpC4wK!s7;c@V|S&0u*PgR27e4BTgM>p#3 zQ$J4>bE^>D+1+)=k)OKVCv@ERUc@X0R@>kE7;B_k6|NG=t`lEp{k=;yG0O${^9(Ok zyb2K;p2(z@g6#C4yd1m~1%ml22wjlZy{4t#fx2(6NPWs|ZY&V8*Ep?uyGj^K%%SvD z8-MwBcv`vhs9J)27}iNp2-k9|jD2)d!L40|ssHL<<)bv;FpBpAIEELSmq{r@Dc22b+1(5Qtyfe`yTU6efNgFX)<486Y^=Q z`rbsKQ4Zt=vjwJ!DQg?JCZg?5zsech$h>g7J*&T)o7x2P_krZMYZ^S&7JOZVJ7x+X zxNs8lT*S)F#R>RzAXJ_7oD}jsg;9LPl=ICKi4nWY{MQr8h$c!>kOPIQ_2r5%i^;6jevYZ+#aEVl4Sa za?vidD@m;aW$sZ!E|3>pZ(aIjGLmo^AQL(JrZ@ zquINz6Is?fygmn2u-UrpH7%R-<1N0+n|JmM4tqa{H|+=rGfPOiL^@N4(?}^0?9IxY zgzoUh&I|Ff`&(y)Bt9MHPhWe zS3k3RQE@PYG4Mc&di!ifdej(8es`b5S(@jg|DrRPMF5t9OwK=Q#3}cbMV)k^wL!xx zui?RYkkFHWI)7Vk>F_6^^fTiK{tDKxDHXS8Y0xv+M7(H!Sv()dd7G3rDOe?#0z~;IdrANF7|f`s#;&?=Kl|?SWAg$% z8j}6KH>r4#DbIFXavpbT@)O9OSAi;7FCbx z#?3%AeP&k#V2H|gW*2ejGjO%qFb93Hu*d@MDtj4p%#U^aibFXRjr50jnNCo=r#KWl7y!d_wK*g9xP8=|?a{KQO)*K4xKsA7(10g5$r-QjN1n=~ zUO4{g`uuq*?bRpXi-LrdK-^gshr2R<4^+EDYRZHZs{Z^SDI04Grc%JdR?h<#{hdYU zhW+D^G zkUMQ=kUDhc?Go7T#k!X_A=5N7d6KrqRvNHu@+t>?{#wrGs$ykul>L=@71MGdNYM(+ zh$yhXCM)^u@qC^H%DaXZL(Dsds{=>ZvW-Nq1X2|6HiF< zJf>Y_qdT!ERpa|)n;P@1Z`MXM0sO((n$$yfie(yC78@L25w6Dc9z%9BlOQ34l>Q|J zI&pB!T;=ts$g9UhVM^+Q0BOb#9=a}3iG5D8+9vuMSAUEGMN>mf zv|lR~97TwjYSFuD$W(F7{cor?`6MV{70g`qhzaj05^0YMZ}dqhO{rV<+Zfty+tULc zf#>XA{xa>CS^dz)tL>LuDHvq8N95h$3{4u(wSQ$;1lhwZBs2PRt}tI0VtvL z%AB=NoTZ{N9Vgi1Q(f5^6Hi9!1Rf@71q-fj@IOwrPy>V}H3=;V zDKF>VcgMT$z56fuk&&^--us(t%{Av-ANStLFcJ7{`7(GxcH~`PY(YotxIS=Rp7Ttf09txg*3i>qLp%GW>C{Ur8D^L0#%hbC zz3A*KrspE!+FIyz{M$%ZT&N(hpjzekXeSpB)bU`bGZd5-%!1XQzV{9|uC*tSrgbEA zsW7`{=MP)xTGq2-}*1=IR=-&zma`*TaRUcQqbcpyx@k;|x*Fkuhan37u( zqR9LDvF087)|O`D{$kGpU4e`Ai;P!=917*?!a4GC;A)zqj2Z0YpG{P7qPs@Nre*!6 zxc~E_dP@()1cbj00eQf>G#~t=+@$4RE8;XilCZiH>3zeLmZbe?^cKZNYTX(=^fq)_ z68y9gMfXzXbmRy+vzlj0;2ZnFCCrBXmipoim}~pNwoNh98giiz>I8ux`sO-r^+DSb zoJTY5!b>yJM6;IipGllk7og(q5s7N+SVGSJsem?Ehn~RTLyD}?d0#k3?ud1JG&Fs)5=_vcWHu24xYd_iIBq%gb zhm)SaZ<&490hipQS3X^7#fQo~zkM>t&u37=f25|SV#Q!o=e=`KRl=!9u9uG&&Z!;t zd|nf0bNKyMa5eb`z@Mfr@AAe#_uTAfebqPnste_9tE)tkFc0>@i$y);&fWWw>Z2KbI|)DqBw_g~`S1zBjR!JJ)IhFq^rmd0sbv)~4trR734>-%}SITT}S8ryn6f z)`!15NtURH)pkkwTY2!gZd0d?6?`yF6&VAWwib>$M>jkn%by|1u^+Y`)0aOYO9$}wjaO}T@q58}#r zb7wOLpt2@2@p6&G%(3rtcUVxOZuCgZG3p~0WB}et?&i5 z*XmVh&|-mwpDb^zQjt5mcy@F0Y3I|9EZkY*5c7jIv(ng{hE}^#+=Z?J z-DmN)Ns!XYb^Vd9epMp)UDZ*O*1U-`oTW=3BmL3`Nm}Qg6*NQd)E-yw?IKAa&xFQY z3!1^xK7(xs=x;GJo9fvx+69M!j|ROjk_;756^2#=&&q-Qp}2Nce_RW$FLh|UTFw4V zVf95SNoXxS!7~Ma+l^m9#EYMN@*4mFe6P%&XK;TU7bp z{~x)ODfDgaUH4plp6zseqrxdq+NU%--(4A~i&sj+Z1s?xlHGr1Z3MwsbX?~5gzVeE zqwoZk+5B3WzdBABR*#pS%#F)+92XerCPZY45xASF?43 zm474eY2cq;cI;Y^(bt%+>|$S?27!HqqeUB$XLd{elMBs!|+U;6MOkJMb>vhk;NQKLhkY!c}o>E%})4K{(cagzo1FQae7CZm~C59=|vk3M~q zheY0$(YHQ>@(tH@6NL0AncBj`W2Qs6D9iyo6Je2zzi61u}ObajEH~aI(Xi&tS7WySod-?^(B(>_O4m`*L~6vGTe(TtUFtRpQI$6M9K+ubr2V zAN0=p&#gcDD)1Gn;?1f~eq(@Yo4hXe!1{uEW7#GoIIWab5}p|Np9PTABmV%pL8$qM zU>BrzhbQ+e+1D;Mi9S=c7hK-Bqis)2TU2bcfD^Ctr~Z##Y4HAh z(;{Z>BEuqb-BnA+u2!>Q{nRZAYo{UqfZy)Gbz?|PqN}@KB4<0QL5MdmSaRWSM(y8p z8d)2|cgWfMuK-lnP4(DKeCud2!_v|3%@mbmSlKpez zDSJwzX1^A1tgxR1q!}s&x*5OPZkFP)e%zX29&X%dhd6u3kN(c`YWyR?ho$V3Pw z!Q$GI=yRDPI-i_RR-Ot-1@3EW`~J^|Jm2?U9uEF~o_6Mu$Q!vsx^S1=+V|N)@=xfM z+1_eBCn|kCuP2Kwu>)FdYQA2PIQ(2rQ#Zi5UQ?J|_TicxMPue+aE{xUvr+YQap{$- z5iWPD-^Y%=WWYhgRNcU1}9`4#`>TPzLAexbcG_k30t+S(HvFXujURIy9{ z;x*HV*71;P`gUHl!BNir8*+>)P{Dg`9r8=$@`FLgLHWRIHy?!Nmjr#p<6v=ys#txN z`-{(o*Iw(D4Hc}<)h-1*%UFM8+OPWPaXA6S2SLUG@18siv<)qduakIHsx=O1UY+Fh#(_oC@ z0z+z6R$_o)kwa)jWX%Wj+_e=`sIBX7>?!DQ84^nRej}Gqg41FpCaY*eSmy}8;r5U& zVgGlMa}~H-5+iDl7RuT6DcF=G_Lz5)q}&cu%L9jQQr?WGKY{=%K2IcEX&n1^u^qHs zqpe9l(|!my>unPStkCFZeJ>~9v{Lq<95w1nPJMf)!&jkV;SWd?Ff@gdP=3Wh%u7jDfG1uHO~tm|*S)jBU* zAl)f_0E0Wvm+A`Ne_ri7>!iuRUyT|PUv$TBZ$Db7=&-X`wXCpRhjNhCZTXrc_{ZOvJ`-x|P zHq&B%e#lav)^Z9JKi;kBv3+t1SQiOzFk+Uwb2~O#`|j+CuGGoUs%GBBB=6pvvPS#8 zX7ojtg2O6dTGLuhI~GseM)bVxl}9eMY$Q&`e)&($vdDRoH(v)(>pZ%VF<0DhbxJ9c zFiK433<^hTOlN6RMLDKNX|nHfWRaO?i6sj+qfW&q15p_<|J<4CKKlG_aeYU5zo>VZ z`ZQqUB56^QTXcik_SCSic|*O%GJO6Dfdf*aM*D55D$GImB4rN5Az0W>1B`IvS z4@qXckdNAkdfV{&_^70L0y>fTQv?2dI-2uOTjX~whmFGdIE$Nok*7(bKUj+Hc0lKHEiJ@M0M?ame%%)U+-Kkbya3j_BZu2zS?^FPy8dHvHX)5b%)hiT0EZy z$nT(2*rgN`e)dHb+!EOSaoq-&_gRQ@`Lk2EQaE;w@MV{`;5K4Uy~KBk+8NMFp+1y& zqSAWB;*T&`=WM~Ni|dI8WKs7BK0;8}z8{%v>3t}7OtmrG7L0EA=@VK~gWp0FM|)eR zbc>>bNf72hnAd9xto{paCXl~|H=lWgd7bSrEhaQQxIppzziy+(rbh@k`f?+H{sa{HikVhd3mvHZ^SQb|2<^ zwN_DYY1ud4-$InR^P!E>G$=Ldd2}soTZmUG#95FAXP>p(s%rRohsmw^3u!gglS2<5 zG}9?(m7@sLKfI^X9T%j4D~^bqCdEsSEWVG+JX?v2SxICO_r}BUdy|`-rRG=01UEm* zg+8+Q~dv-K|WeP8^bI}V$7FVi_n?DocH2Q>|^36t0+=Q$D6gLHZi27q;|1<}ba^rc= zweI^h?-VA2l+s?p1Fknbp0~z_py!NuDHX}rRxLXe&Z|=={_{N7M9t47BXz-lmX_y@U0I~1!2lN7FBmHkZoH!X` z0H65uq&@jYF7v%P=m;r}l~I-du)U;YtW8wk%Vo_t`H)mFTPlpL~_X=G?Y2 z_pXpl8zDDd>83pPm`JQc@2H(?9VPGL>LqfinHs--!qMq-yO zIjj<#!f0j6#9TSn_nstpF=H6#l!@Ga#tTiM{RJ_c%P}mST`0T~#T#W@ET{rH0BIO7 zas)8j64iKR-S-aKAv`H3lpHa8>~AK$-6trm5q_QTYz+{@WmDCPR;fvNWSjiq?cO5t z8Ldz^;LxrYQC>cYp)R>qYLX%@SZ^)ZhF4q3?sW=w16Qon?Cjod2}Cli%V_>?jn#4D z0H8uc8_0t3BG8g{!oi!YfNWL`xA1afc*jRAqMaMQ=vpKjBO04l+=N0c|FKidV))JF z`3)q>iZYWS&?$up`+azNvhhAXg_DCq!)`Yg0a$d`EH`IBATLTAP_HW7P*X#Sa@*4WHB+pO@HT7i~W-u6}^)F zGG}Dp9FxzMxxf$y>|dluzZ6k4wRmd3#FfS;JndNfu`zbsV`JMwFd6LIXl|AaGCvNh zRvE#07eHVr(|!2R(V=xaxrQiRAr7IPT7Fh4vQe}9sC|5`3`7RjEq>zarNB#wk$d4A zQN)@fG}(BiG>gMLf<1@Olmfh#3%j5?|E+0T^>M;n^M-8vM25Bi5p$A$slNf$CVpm3 zSA7mma3w_^-RPP7guUUEtR4`%v7oRzN2g-I7UkVsfpufnjnZWAw)~AujlvnnzR%RP zXBqyET4-?F2fhgUfHfXQ&NnN0HX0^YZtKtJT|6iKAq2T|{G9v&N)za$Dgehyk|5@= zd$edhh)a+52hTn%csvY}JXVzds$8GEnQw!RCBl!`6ig;QO(g$3>LfES^Ern^cn=U^ za+$46C`Fk81@AyNn@&iYJ)@j);C|*aKkC7%qRa?`R+yglsf4f3|BfIjWD>D!lS`n2v zEm|?OZ?jOsEN=v?!0E(rI(1SEcRlTM)?$T5ndp(Hvj#JP8d~!vcI@03NE7?clvH|;yRm0UmuT@hi{0ry2^=xk z0+gi~$R5fzSP!dQ(r4X|4$dR=UAPQEvu^X*!RQq{2lz7iH2Av|80Chq_A5!jZ94x7 zA8`O*AFwXdhTkX%#~kmEl536kcO$HsXcerxa$8!>iUQMHVcPq^Gp4`ULq?f}wF|(8 z*?65PLB)&SmFj!kpZkr{BtM`DY~B?ullE%K;`5mRKJ{&K_FStZcR=!I_=d})vdY!F zEgSm23M0fiV|OI+d+4@$|||D`nfAasi`@C~BC$E)Uh($IrN35-oJ%tjJSLdJJmdk<$MT zZX%cOqePiJL~%zj#lOE`(6C1;L(=;LcQ~sE@wQ>rmp}2HGg_76pTHb07#ZH}(=XqF zO#LEBG5hUTAyGw7GVi1Ak-~p}N* zsT{r&HSKS&3!Eqz*2T&!x!}MWiO4B@U-0Qp&LP znvlu+-f89_NnPZ93?;zLW%S=nC9eRu#IvtC`<5F=wLJXkIsFcxAG)ymt_3TwCTA9- zgy4Ag2Y-}l+gE%J`DDl)#+dTM_2D<|-xqkmCu0DCSmaj^hrYu#8l{Lfv_C`xSl_$)4n|q7;6@aZ4P3R!Pi% z^sZ`MD#-{vY#chb-l;L4J|bL?wS3(oOxljyv7YixD>qsSGj7rHW}3qNxBe_0IHAqa zY4*xOEm@iMr3ArF z1k_{w+@>FfxGA%5LL~J#l_@OGt&!W0%1&t1zF`$t#rKE$cz^H)&Z^8U8I$t}@mT&XW(z6Umw%%4$mp&E1Q zCr&n!!3iXkqe1jp?D<7QQFoQ#S)4>p<+)^@v>yyS3U7X z-9dx%Agv;qmSX<#f`5)RzXQ*d9|ALl`SqP`@W?3q(Dcm;y49#atrk!-8@nRsw{=s>%Gt~1oDBTA!x~YV zLAsi;As!O5ms+||mo9S+r$ApZ2K&xhOl3*ElL7bWhPbIGwlT}WO_>;Y_jkx#La^3| zNH8m*(fB%=!P4h`#-B%omOkv4bW7-MhJ+>q_9ON?_Rhb}%_=nNE}#oVhrf3p2~RRi zwCI2#6%5z~PTAzqrY?DVKe>3`(c*r6y0-1Mw!&P9`lB_#Uxz@ zAS@Jp1?)?k`YSB@ZPHc_GArk4C*oshloxNn5q_DHI844xG&)ke^V5>usi3q$>UDI8 ziYZ-*GZKdEe~M&%*+}%w0Le?^WE5p@?1`ySDz>vORt#-S6?dxv(SW6=1=p2Vtj~2b zZYYhN{4o;7j`=6FgqTSuf~e01fp&qT3?r%7Ve> zhu{tU1O*V8kxb?YM+>Oiooy|N{3Tued?#*Dd#;9lZ`J~A^YerjI}^;f%#*&E}N?M(H@2)%_i|n{(-pkFlb%x6Oj2zvJ3x8R_7$e`6KzQdF;O5U`P4R(4e47i9-W^n$!aHY5XRmoUY8rByrTW3PDFv+f-GCt zEE|NwijsnpayWnyoe+0zbO?0$KY`4mp8}p+-@?&xK%fs=TXf!FY3o6|6fzGJn|X9~ zu_(`g3p?`#z>m2TN2*qG8?H2sWs-L2@Z`Nn)W*h7`)N?$JcS|3SfVeyNY?nc(+mMORtR}0gQsVj@cGBG;wlT9I zo9_T)!J9A4Nvv|cf}q>^_OYiqx8#(9>FvoV-02?s(DeBE(M*SC7cF1KS1K$s0n|X8XD7 zzO-q?_^YnuHisrog#>Pl_>k;PGLb`e0GXAo07G4dn=8~dDX$b;aly!FVZv#& zHHdIuIiRj0E=M6g24H~RGX5ZzDzlf;6G@-(9?Wzz0KbB6J*!kW6-ar0`eF*py2R_e z9@!vsOT(~R(Hg2O2ACf>hhm*)ho%qWvaRjBbRb)$S8|YV9N9ME(LLsyS(Pwn(noZy z)i}r7N9355GcWMMArjdW6Vv9aI)tkQ5}XRKo<~REWp^CKj4%zZXrzH=vbfIt-;}x6 z2q?C{E;VeG*0VB$XKoEEP~cI$9o=G)tF(g#+7F>-$vb0K{}Ht22x7MBd=6`#kSN3< zeqoJqE|y&T%c|n4vY(xnAzuka4eYhQJ9(o(>S~JCpao7w#IJqgpO%fTjs@&|o22%P zB6jktU;cRvt)s~=;C7!`zt84tR-Y`!JSvNsEJDilD^q{?FzGmX`>jH1XGDVbXr?6W zv?bY&;FqHAyZtL7RCI-W9wlwf=i>Rhi!2+vw?pwLBy!PIkXSKfcuU1oNjlf~Uo)F& zL9Md@-iUJZ>-}>%JT?%SUNEcmsr_{f1s9$vDE14^N;%@3D`XEyt9|&q+t@9(4-9Zq z7rBu5N<^Zg=gV+SdSLyJmp`Ul);33Ko@4SK{QUBiPD*cVxli(Jkw#)n4h-%YXmk7=|v#kXKIoVNy z*2Ysq`qQp4^9M%W1f6a#pbf{8#)`_?0>)h0QM-58hKwBAzEDa@y3FUL9{wb4+ z?bh}djbnKojn4%hDv{ePaUx#Gdn7R4gq@gnq(tt=hQDjT%g_nej|A|DJ(xqi<5NY` zpGvTWD3*F1SKuSO6h43U_@!<2zb~058OQ3Pu~LOB0 zOdQYM4CD1t-p1jOXTy#;#*%sL=U9Jbs(0DhtR-2f0}OuII0=5aI(BGB*XzL$#~UYA zj62ag%#R&gAfQ`3uo(U6cwn+U z_Y{DTW^tr6tA-my`U7oA;A_m%Jv#f&V+po)MOv@?*6oS}YVq zhx&s?W?p2lWKF5pAU!_Di49I@ePEKTr5Leh0ORdeJSrb(BUC28f5DF0ue`|p0$gIl z#OedyqIy5~uy){4pog+Fh+gyZmE-$Ygn9)-osn-_CjW~P{+~$Q!jqzL8BVMEcx^_6 zSqO!9$}})4^iJ4j{YIfPBQG7g>F2t#tT4mVS%)Of^p^^8eH=e>4Qh+2mwEXF{`Y%P zhIu)V-f~crl7C>+4pC+VH7`EGKU)`2@S*R_}5f_k#;&eZuu zIkH(?xGuFN%Z*&yqlT}THr(R|!)mIe*U16-2#<-vDVkq#X{GMoe~mH^+x$US>3zUU z)@)vOt2i;(a{cx*f7?)>#hlWBv4$?gfKLtkOyxkOWN*q1J&;MwH{9@z80`1OBowG* zwej@g!_u5OO`mxdY2nnOW*Jm775L{=u|>fIOzX@5(X6#x`OZl4d63ZLgP^|TYHUhv zI-=d#cqL&pxieUO2)IZvLdpOdp|^-PM35z8GvoCGr+Qv0QD8cOb}5z1RR&IHe2!Kj z6zDb+9DrGRtLjj&`hc-P*_xtW6{;h7D{8s46C&~*2JPYK_0FFxk-Mbj$W<4zXov@$Gi#ggPq(~2bE}rBF;d3 zHzP}uox`66)RJ#{^sR&$<^n9V!D9~Ay@dsbAmPQBLlB^LPbN>g&gqcpqO}H+L zwu8YQ-`T4_<#)>?w=l2<+vmtlRbLgu&CVz(slU9HMd?wnibNFV@Ds0%d!1XaS=)xM zevRi>#PcfyYcSXlrUm?}o_0CC=~UP3xV+DO(^w&yK4fh?M5Uv@d7 z(OINj#GT~pu|1V2MURhOihuZ_*I_5$Nwg#3Kq94N0XZU#7^~+M7Xh}5SlQDrr}Jp& zM{k(DoX@JaP6wX@_o8BBE~g3F;>rcj%~WRMSy6ux#IVNKle5sQvje%`Z+K4$Hc#}< zw#?)p;C~y;-UZ)19<6?4*zjVrVQ&GIE8<$8jp?v9BY~fLPBa%7f+SR9S@}==^EJsk zLXYNOW=qIlx@u3>c=gLrO-s!@C$?z$70VH|?YLN!rfuK3=mJyOF#b7NcECfxW-gjg zO6bqdUa}`cFiy7{FFz-*J_yb`(R=dy z$%PqE)o}fqH=#ycwbuOmccHVfMVn7sn1Lje|7W`9JY#9msSVC+; zSz~NErd+6=ut8{E?t)B+WN+u~=qM-wybON_U5Ms9ic2W4pM8yB6DIEK(EB6*;#GpD zUi?ufp`$kA8xC6TkEZ$~EhfqYEh_JZp7g}AB)DW4%gD*oZ(Xzhiib z_{ZtAVpjTn$93tcS0T&=^N;tOwZD{kE=X)#nAgXXAi`&Tmg4{O%jS-sk3Ulgo)-bo z+Y$kN;iahb)bWMOmmVNa6B#zh?6>x0DLmjj^OnRE{1@i-Sw+ZG{nq0H!- z@|2x~-^oXVmy_1ywo5pz-A0xA_Gnjhb$nVj!AEZhXK{Yxu{~_|f?jWObgZY|h`z(@ zjm^s}mSIk>2^->>6`o{X|Wr1{lT)u&vb6T5a5Ubt}c$ zUDhpLWRAOJtBSr-0=L>*xE3`Xk(gvr?Ry6{Tj85#UluiX{x{bs(VhE6|K~mA*%X$q zw3ew{DD6T%FvE1gf(7$XnO|tjVq(C=SHXHH(E(lf&s<`yaS9&ZV`jHx--4;SV)vFh z_c>f4jN1AQ6Zx+A*54IVDrb@hba-x%H!rVsqF&9agBo;+Vo z9vuKc`*u}Z{J5ofv}^bSRXNisc5kGtPDhD5bdY=&-w?nWZ0XbvpVrsPH466X^8q^b z&U>YUcnlHz{MU(ADfvxfx$aXdQ8C!h{P{SOGLTWX3Sxs=ydt_zHMI;Sd?@IM$s?0P zxvw3k^c#3d7a=;Ep`o?>6G7slosNhf6BkHbP*#AQ!bU2o{>N>Q`(c^?XVkj#`2RR+ z)tEN@Bp9{wQ-*fGBNBhYsgVxeKEeEDgNfy&A=aK_=*#;Q)~$zAkWsq(Fejb&^GDT9 zl*R;Lu{ob#+K1_A*i7nA>KR$IUtR0#yZ#d$45qPHErXe#$vm z70J;;GSb7(MrD3jDP)9u+o4&yeA8ju%Gp2JkQhohk94q#Y?q+K+t+e#y`21PKXx;) zp`x|uR-j@d*L$^FCkh@wXbg}qo*AkrUq6L0rzlsQ2~;Fy`I~}aI?nnHx=}eM`ecBm4M~lxFBWYU1^qg60 zKFhf&)<$TQ%}vZ$8ZlwY$hss;M}5}c`nGal=EOyv1@;lk+3T~zf_9HL+)I#D6iM2y z+rhR5Y8}2+!CTDHifqi3ej;;0M!m9hGEc`zHb|y$_9Fm|y5s@YJAOhA!yEYGmT`bA z1(vzXmKIji4bC#Y9F@zRJ}5sM+VH%bL6~9;T)MwvtvdVKfS57yrhx6Jv^yC_(czu^ zdl;uYBMUsK#(jFp5U2sX&}x3`6sr2PPXcQG;5Z}CNYOe<7bO}Z`fUC!vP#N3h3=Uo z`a$O+(DOSSid0C8Ent3&FKUvHH<=yKXZQrwZZHKiLrm|-i13rdwM?MB?C7#&?!4fj z7hWn#Eo3~;zN@Dod3l&9fy%8qHcmYLaN#d-{aHP7g@PpKcOdcaht$V(b`a<6RlUtg zX4)cQjQ9e1b5~dpU%ZG-c$;f5BK0c;kGrqeX44p3%D$Vwr1bo8gI!ayZjKS{##E0k z^|g3IY%=dGueWEF_LI{h!LNY^oCiF1^hj{EsO-1je2IOmcOKtm=|B6jt|_xFf3**Q zliIH|#1N?iLRPebr~Pd2_mNT;UsSmbI~JNl|B1|UA-wozMBaTNyIBorlDdIuy5FPh zU6y*7y_SP#=djXwXc&Q6L7ODFVBc!oOoGvh0^#nf}9>^iTS*?Pe-tBJvZ8 zVbAN@k_FRc5f*4aLqLsRpB!c)%Q9N$Js|lp>r+9mWB0K}2Ju2`IHD2oS3>*VUts07 zzrqm7c%1s~_8>D!Wr95C|7qq#roC`St1C zktv5@H1=g)ZE2ax*tvq45BAL3ab&h>p#ZXuigZF){hj-d-##fVjZTrQ@;{e2S@wz@lu_+|*gft3csn|&eFO;+;8=t5QT$T|RzsQCbVEZk%bSd8gM0nRdH9t#*vOCg|@x{xyrK6`KZ1`Rg0r zN%ihiMUTrowzV@Lm|% zsrEwmc*-WU=Os5;l$%knQY8j{X`fPM06A+I_=H?1wzruyj4DjVhD0~$^^5}MiS)s~ zr~am6H#6;R@R{gU?RGvE<{emVTx-K$h4?zxdZOyKM;+q}+T%Taa>rk7R6@=wc+bZ( zpn3K%dV!&J;Q9Ti^ z%liIj43;r#wBYJG505-7+R(bOFeQ|*SmRAr-L^6=Uz4=3S?|*g*M~wf9ep07s+(0H zwWN>hG|KO`qCxAO_$b%%td$DHx3zzXKk~fsX!6VEwJ|U2@ScjUOcUs6*`&9C!p7e6 z>7`P)Y2Foz{>FGWD^@V_*ZB8l@8-eU?2nmu_&gI$FmKDTi2xuG_SeB4Ou`U7heBl&pp zUxU|=kVfV&(Te;HMfh9h&&Hy(KPZ0qqI}jqv0y2MmXV!u)6P{WJpZ|!NRcY=U0l%X zfj2E7WQ}U|dT83~>_<=LRQRQq%DwtcKa*jvUi>AY6s!~17aQW`n02b4BA4mVE0m2q zIVTr?MJ%@eas-o}Cox+x&WTR2WE+D)o8>3_g)B)DkJ3lw1w&OIffm!=ZoEI^)kULU)-za&QZ2YaYK%r_Kj!4 zs@YWOD#NL{hJ^o>1rV&cbwqOCY@)2zLr=pQ?NEj+2s>*#(77Wufk8eIuUP^|-8kD@ zR_PE*(5MUg*FF|Y6&;MtC(EYKmguy8Q*MdNP6uK-xxEm)WVX4-K82<$HGyr%rfcz^(u{+%x52WKm zx8*f=I$&Ze;NV1v+3M_j-RS#WVM);$2Z-Mkuu8_$xo)>f$m5}F*^Sko#SQ5h?XySs z6rkNRh5gCl#>W>@)j7W<>!rn%T>BK0;Li9l9}J59^NdeLdLbi5K@Mu$Yctky(H)oP zBq+5N8(OXDJ)#0lv^+UfLUKMB+O9C)SP4xT!XpiNbGrECiwz%|D#^~dvM+!i#F2Dr zd6V~@aH^<}|GSN?$pE?F`_SIEbwMhK8oKYG4_wSN9P?8ltnW(J3lL}p4R)>mfZGI!`vioyWQYVx`{pTs22S>y%y9dqzHcN$%6#f3J7A7o5s!f~A{siSvFHr^(}baqDZrP-!CHna_1~o& z*D()DmMJ@Q8;mFE&;pkFiSJNd(~7n<8TDNWFq^H`*@_0p7j?zQF(b59NQv!=D%h3=bOhpB{`DIb6GwxFzukSKwc(2cFQ*BhY-+obBk&1w##Zvs$gUBM;rZO}@t%v@U$SPpp2uFys zM*{Wy--6Kp1q8oEnG#QQc1U5^lyLI`NR)%?S`#B8czt4d;`+21s^1j#rK)^&ITAwQ zx-5F%TVHi1fp9Ova{aw^7sp-JFwkNL#~z}2Q!;m08wb;?3Z!-%g~k8$#8@kl45Kz9 zHnXXP9I3ei@XnbZUx&>z?%7haH!zSI7i(vKBg6V+csrN1z0CQSXjF&0SL@9e6^!3d z#G0UkLbhja9CG}NcBm{ueVh9k+5C&8RG+ic5z?gN7`ZxP@Ax|}YqlT`87y4@pIosi0!E9givq41x@C?^suzH_>A(cncKf26s{>^96svIIz5+d z3Q`|9v7_A9m&a&*SBkB)KW_D;QPxu)KvuAjPM3Knj!#HVJU?2+z?An~86D(F<(~Gr z=fi{FKT1u2IL}*+TjoCWQyF1-rrG~iJF_4=b1XGKL`QJ7h+<2jqNrI=$S;-P>ABpv zz!#s{J9)D0Qgd`aCUJKL6IGiD#K6@TTIF4fP3onE88#DRvM&&FhBZIe|F!o<>&`1f z7@HB*=1QXrU|?lyzLU-}RXvQXxE26i;GEtmd67gO_VJr*eNzQj5DWFHDR-aOK`J%0 zIbu_It_!aiu+x4TCBuC6LkJ9R5fic%&uzuHA2wA#M6UqC!>hlEQLM^N8LW~kSF*j& zs$vs@w@M9bIwU?64I3Nw<_YCoo{#NKsk(S%?etF}j!+R%3;PGM=ZEr-{#cDXm$fQ5 zTomF12~|QRoSBhM)c)UUmqKBL3YaB8%oLuhk~_Lscu()^2H7dtWn<{GJ=6J@?8wrK zRQ0jxVxFd)oGgz$!F=PmT2)vA*rEMYxNYy@lu`9P@Sca#n2L7eGP?dHB*is|Dx9)h zMUvc1=Wb6Za7L?LXbQLrQG_L(RPQ~*qyMVr0micS+y0%Cdi(S zwl;#`hDSc9r%@Y`6w^h?8AjvOcG;gs{9fZC;Mms7If(xPe_BbI%L)Lka~kRs`!dC+G-sQG=?py zN0p&SZto|tkH&pDG{hW#Nq%jXM_3l#FSHTu_ECJNHzSs}&@G)-&ykug;0>Wn_~b5( zv|X+dkUzW+72fh)wUK=>haDWGh%LihbR&K*Q>gyz9#Z-u&S`6{Q`Vd1r?W(r=2ws; z!*%trjQBP00D2F$)raP%^sN1QOKw^WzhWmk*!up4Xnl&-G$pdk8T~&*;{WSRI1vQg zkCUAWP^2i8Zl3Y0b%;1ANkyS zSZz?-PCD#i5cK&l03?qq*Qb&>=GzZP=drAT@9#EKs_vU+?T+ylGB*gE%v%9uIEBiC=44ck1{0QJJePsJOo*?`$8|{Hh{^%emN;xAr;DyJ4Q5)KH~^*Qs)! z^v+xB7LPR5#p{5UQ`Q2;MsGOpgbB=Z1(~=&pTP9-vfc_jP9-FZq)+Pmim$HA6a4(v zNNA%;4}3@eA=Ejbe07b`NAm%BXW>$~lfNm~Se}tgBaiYCmBkSZhEZGC%OH~}n27cr za#`4H%%=I38ar7c$oq zLBZ5ozipsd@ayi0G15CXeNa0-i}zAGA#G7B0tvnu3!?1B__PDrtAXT1)8$dY(Job5 zH}4xLkxxm`C5G%ip71EyRM)gj(iCoNn|=r?$VO8!<8Z{7l1Iugd7D z6`O5yeDDK%Q;y0e;oqs-r?joSts1sjv87nI?@{x_8hfQ#V!r~2@AjP+Fy=LW#>2QZ za3#8TWUTdCfp1d5>>F8!J=c!fjF^Js+(J>607WiLhHIENtIC z1mJ30BxTPnb&jlYlta>;cO)Zu-AUoJ|H+$AQy-{qjqEcUhA1shKQ7yX7GaQc>YIM{ z-F#Wpti3ITw??tx5365dw>stY)YfkHH`ru^MWYK+wZ`56oM0L1%bkyL<1TZ%3~SLw zda&UDO4?{z$gNNS$TT~>CK0zQ`^&0CndK*F?_T}C-@S_ei?jC*YckvVKp}*V6hW~7 zp{U@fprUjLh>9akMFkWHii%3_J)wx$03AjUAqnb8RHR9-2}My_RFEPi0fZ1*0t6CL z?;FoKGr*ZS_kQ$580GTb=i-eX}A-GGq z_HOv;Yk6ZVdS>sG(meMKE;^J8TRG-jL-Kjs*&$c>9nzwA-?!>??w`ST&CU@aOPuIo z2t4V|BZ1w5yyS0n(=fZ4A3{vHl|jAi)&V4=R7DgLc7}4Z`QT`!Tl9Czlfp#K5*Mq& z~Q*`$XZx+j`SY-Z4v$wN{a%g)`>^vDmZ4kxCYudG|J3PPII7#3P5s-;_Q--|d z3TzBB4#J2d57f^NTq`EY{^9nvTpz#A3ewdv%^xM_|=;)zOEINsQlm zec7;{fxA$R^twUqm?9_}!YR%wPcJVxXHqjCrg^pE-i_w+H0H&}^BIcV79In#DDwpH z#!UMXbBG^QBw}D-SdT~i1bbe@kle)McSu&%Vrl04c~;zykj(n&ntCthgdcZ;EW>#Q zJ;WQoo&UYR&7)?#$PJ~Vf%y=s!)ZGY_6-KmSwEUeJ5S6726piR)@xxgzv-_@(r^u$ zMB^{B>5GKS0<)5;?FZ+Q9~fC*;>rRS_zr|nt_Q(}F13Z0m)y9bT~h`1evr{ND?FaA zxqKbbMv2v@2qy13zIDD5GP&gR-iYGo>!{Poqc(KW_2+i^bbHvx9O$?xdY;vko-wem zwQL3oY_(zGD>tv+KIHE%#>kmy@&$2)x?T!zj1!yD6?`5&%_8u)nKzKC7ANW%PzXbFpMp_Kuy#h8292i?@lR@0>N z|F`IX=M7<5VO{(?M-4MFKydq$bfL%TVl%g6A-CbQ7Z4=rBQOKJF`6}&W-T93KW^(6 z*~NZw+MZa!h)b(la6mJ=+2ANcPv9af#FV|QU(EDH64rlIN52&4Letj+xY7^hjmLls z^g(7pJBirg$eoDQJj%v3nY$ZbVjJlD-TiQPK|gV#yl3ArcHY=#`%%ZFCo$#45rb7L zWe2llY|dQUJ2uF64mjjwXUZaFJeF}wO$vAQ_YH}L*KH-aTq~(H=y}g-y@0%^ zNb#b821v@`S4n3`u-*N`kNE-aB{&bVx`o~HC@!fC%uagMBzYJlS9uvUCW(`YnY8Tk zV}J0ofiCJobvgXaa@UH@PCwP@{E!jQnxfNn=NYAjQvWlt!6ZT8;gM|@hY#N86!txn zI_fSXCQC&ynjpY&22{jP&k5%8Z|92od}f!lykO8MaM`Re)R_n6k$5ICL?OtN#detX z)}Zmq0QUkKs{3XEP4OiA^QY)vXAp}W`e^h;8TqEm$v6v*hq1hzrR(!YiQhL;&C~^X zWwQwvQrCDfY0bERLhqwcgxSTI7UT2)`0z$_p&er`%(>IDXzxu!>{|~)z`$ZiIx8uvK`7DYxNbT_Mn$JFmd>)~%KPJVe2)YWE` z*QofrAuLS0Y6)SyyCkF|WiH(u#2wn07rlDloAd!eNDts~N5NV&v7^Ly;wDSo4qX^1 zhrIIXD|Xont9Sn_l(&Mn*Quj@WtYa>?kGlWnFZPu+~AZtx*h*y8K@~0CnRsdn;&b@ zsp0&>Z^x27i&#iQXQLl-8)V@A)#K?GCooh*TSasmEt$YbUQGK6<4ieHuBP=Hfx)%F zUN zhE`7()Lff~>u;5|_^L2^cxMs3sq4wWbU;Qd?`dd2vK0|0x8G_f5$Zp5!F;X1*l>h+ zd31S%YXJ3&!Iv5ZoR)*iaoJn_EPvoXFbZyVLoR~o~dMpnY z;w1?5nK;Jb7^4rxma0goEVKE$_U)kStQ12^+uXoeO%wCgJoBczQkFo%pmFCr19YLCvaH8t&%nNF9g_sIhZZ_%2IMCVEA>Eie+m&e%im*IsM z)%|yZe>{O0CZ8)v&N>{}4%<0j8WS}Bv1ZSW!fLrYxK6PxNyAu%*H->1h7*wBm1zt3 z*&w_rz1<_Ia2N}e-_Gc>zkuZWwhB_5<*Aeh=U%S$lVUTXaUl(i$nAiLxKlw&#Y~2q zQ$$OP(UdV%a=>vsNnqHpr8i?que?^2v^bXg5MIZ%bA!1DNg8{@PF=ei%vlnqJLz#6 zE=RZ^ix;MAhis*-v)W>=R$K@@b1g4Yxh5J=Gaa<+;WcVDDaQCbkx-zy4`mh;xp_9? z>l`0vEB*j1$CyT(3ASEs_GO7OMvG_Y%j;wu0dd=QI@SSuiS~f%yH4V;n=(_@>xU6IRQ(<-J(KQ;9xvZGzu333 zv{2HAFmb=GhJxu-WZlUKsn}ShgD?ARBkSaem38t#{Gv zboEuWkA(1*mmqG{pi6LoZ#P5}Nf;%w?{_&~rJoy15^3wv^Pp|*F#^RPXTs~OWjBnX+@NF{uBeX_XPD1ouHv-tiY7lVdJM$Qzs z8SEMN z*RJfJ1$W=3I34)>eUdPn0;kttcpNN`zV|zw_VfCQk|$6>Uctlj27psI3692uXbbYr zJ$Kbz$B4Ea@v{2D(%8lNUFR!Yj>;BxM%E=uZ0G6VVJ9o=k;s=F`O9G*QLaz0r7fM+ z`wBe?MSVYt&SmnKH-13{`2Ozm&yzy@$4RM<<=xdpJy8iK#*s&oASjiHACJK74BPU1 zfS-ZOQ<3LETuyHG`aJ>08b!z_(Gk8?(7pH;y<;;!*o&mPePGUuxxN&p;(*)RLEocu zjyFB}djipTjn1HD{RAf+-BurznGToGT9`kT66sSY4P0FTkgSCBKybC<{5_zS#)4G->(|{XmV?Dkyg+HmN1;IjnN;G2o7|J~yX@ z$IPbIM~~AR-kn>?Pstys=_$-8Uc9=(oC*KR%}r|yvc_h#v~bTCrv|lE&%$DWF$&EV zFRVsWa2u>~qJSamd(+3m*ZHVD>P3%Zfoq^3*>)(CDlBl0sXW1~jx)fU{E*x>f2VlQ zy+2w3dM{}Zjq`!l$-cXpPh&^73#HX zQpUs96RqKIE6nDRZpUJ?9ysgwhUFW$|!IhV>@eAi{ zm(hKCd2I%+?(ZsF`GlGsWIey~ZZHs0Vd3)Y&`^1q8+`o4Pi85{HgQ z*fVuCWE1lmN?t>;c_)kMsN4lX$Q z&<^i^YBTz3g**`^j!9^oJf5NffF^@cD=Mp*%(DR)I49y3v|8N!Agcj#bih%rO(&Vr zo{=>mD<5g>*Wctz9H;p`;_rq!K23``762}G**Mah`K|`*9SK~o z2#r~?gqhaOC;Qc=m~=e;)BpWDw=gH3A@IC4yn)LrZ|fZ}XCsSGe5YKG2gX7Mr7ojgNS@#bY%7!t=5|);A#wp|9UHve&+noMhL$2FNS!VO8n^n`QkP`Q80&)^|g zd#9Ts%|f9F(o-pOWn-c6<5R}&o_7)gv1&GlpE`aMNABKUQ3#bBT{ zh`Vvu>ev1$X?K{oOD=7t`l|ut)|Tb6G?~hIpv8gSH7azp2YH=9{A}zkCj!=_{U|IzE|v%)oE$ zeEi1@Lam8`iPV}Ko;68Xu<7M6d8@015Ptka*MCg=&zJbp28>-hLFpT_I`4bF+M_Z&a^LWMMA))rG(pxS{&G^p0Yw^&0zcHE>VEeUt5`=+D%9 z4EQzn4#PoQFM?>qUsDPUz5O;YAKP>D0@r8#^|Jm#w3+^rt~MR4$^DfWI}^aFf`5E` z6fu->TzA{r+56LC_Ne-s-e<~B!z@~cap&Zz1ROGBdNog#-}#*SjFzqQ!hgPDwOD>8 zTR>*lC-Vp~%I3_KZSMtm*j_0rqqi3{Ve>wI!ns}?nm^MF2DZ$D|utt z0-h2s-wB-kutrS(Mb#=x!&&SC5Gqx=Z}IB-%{`UBwCs_Sd4JVc|KC0uoCTxUL+7R#9n!4Xr4gdr8xNKS%vHmx&Km`cQS?mkNHG|?p zfv0{}nDs$LS~RZ26K_ZAL}odn!v|cOm+@!7<4U?{4!(L5w3<+iVaYGAybr%L)|_iEE3&Tr|0Wb>i@`vo?v`TBj=|- zVzfZbkN^r$4FDn-0051O9GZ61=X=bO9D-*{`fAeG5zzM(U@yNac&&By#RE_G!*|&4?2Z6|Sw5 z&MG=w`Jj*2m2-b-Ht_n9oq(QFmt!8S^B3X39Td5q=$KF@Q50&xq1p3jwAc1AH>-i| z7P)*tIGfAI!hbGg3`I`oi!-eME7Jcp(10v3^oDG^)yAL-Qskr1YN_iXFd+A_LmC}6 z<vT%x6#(VOArxsim1+4;SjF2B$sbs1|PCg?u=EnfF411~&^G{>@ zivx8LEJgHpUt$ zuW|3SM*yd|XYcoQo($;t0rDJgQ@7exZ&>pY2j}Yo5!SIq(k;1>aXbxn7* zA&UMEyiVwQapI=cR0AoBC@_r_fiuVcIW)NN4cl5^DtL{8P$hubDJ;6aCI$IfXamwN zS_+?ET~Gcs)4(jT`2508b*!;;X)S)_D)o@uvzlt)ebW511O)%M_m9&nR|y7wnjz3l zUL^yd0@?%!9(L)A+Zw8Y6{_RF=$EGFN0Zk~!YcNc699fWIZxMm?^As5eZ6J#`s_+s zR+WgDdX32ij;D@}q}LEkP3N2Cf2N9;p<6vW+Jdcxmx7z89#km%yJ0;neu_)NPN_=ZVY=2 zf)-{YW>PuDWIQ_)s#`a{&o9%rZm9>j)6}|P)4F>4uaGRQ$T#jh?0vn}qdOi2CTG(~ zUf^o(0jjq@U6)8}A{I0;{6u;)70a3R`a%%&ujieSN0_W`~OYx7s!T{F_( z8!T>|Uns83yjkOqE&1@qv*`uU|0OBxvjnVxQp4K&syiw~h6`#k`sm**Gt2Kq_V9u(j38Gr+}SpH=Vi zTvuw$NX7+`I&D4iD5MW7`+Bi5YcYv0V_?gG0*_F|Cn!evwr&L@GnTaq)BH0wU8jY>MO;U-x&Mfx>E^0Q$E*k&rAhI8UM$)cVuA89M?w_d6ua>|z zNo(grho5{~X9lk|VgPJa^PlQ|O(tW|C?NlpvEj|2IVEJ^Xb(C@R3+R!vc-7#Kn$K9 zjju)U7n^{PXMJ43bG3f2rp4coKe%++)EbH&^3CFeaedKRlEmJMMbF=dWuqdeUXzPk zRI*fJXkWr58%jW*AV8f20T?< zm^`=UDUetkjg2M9GsGDL&RiHC=)NH$7$Zp-E0u^Vn|~(2zstSRzM~+(2Zle8rEHqr^ z;W>&o>5y|B#WZMnhW52h#|}W^T4=WcY(JPjRESD}K+T|c39QlSkY90#x@8;qU$6No zu|5PpZlMGAtu~M~(OeXb3jx|(YBnC=EQlg*>>TBNrtKpH2QqILB3Kb|T@93b>i=21 zA|Bu2&uM+@cl4{1R{F>E4*=iB$itu`1DUggmCwOb-4L4!73_`bu}%QRn5zct;p)TulWTMsuxJ-$KtP4k zqznIO?9zI1({?zXBo=6!)CbnLpd1W$FnB?kV0fQ?H~Ye$YQ$Ft5BczKv^S4$^8n-Q z;zO$#l$$sML2v4k1jhFX8#EaTumT{x8pNX6T4xS)Ap(*i(gWoFxjM2ZR?Aa)Kt~vl zzP*dLs0p+|j`4O7DdKDvTBK~1d-6XlR(ta$5hlSvB*pR)b!=M18IJZu>|!Ud&Uu*m z4A`-_ZeA9|ymt~2eCYXSquY;1R=NCWP(52dFoD$$i`aK|UG=rf3jUUK7frrr)>MA9 zz9~T6-x(~%K>u$a?2JRO832-!XWc z+)tc)#uIAH>E$tD7zl8CEZU4apK8yC@axAf)?a0IQrMk8b9R2A0-%t%$~XTw{@=~m z|76=Odj36G62@qx@%QI;@vl&FZs&c_uMdcvzDO|Qe1S5pFuhpr($7>!*)Z_OyBWvb zYxQImU${&BL}x!O@W5&TDDvg5IP_3{J>CA9;LdDA-3SRDaBq;|eFho;*~xP?6x1;F zw!RH) zWB^)E)0~&5$kPZwO0(!d_X7mn%yKN&xe0Vbt&1)H*3QZRU?gk+OB)Xi6|-wUYU6lr zck(-zR#JeQiQ}FEuP+l^S_bB}YlOCvk_yYxFi*~465K900RtsrQ1Cz|lE9%(eM*Eq zNg)I*2Dcz93Q=dnd-sM`UI?O%Q#T70-`-+}HZw&6t45KtUNB=MaL7e92^BS=siR^X zIumHKb}{E4$oz|VQ4~Ka5#9>}Zf_q48JJa9f*Zee|w@e%X9%y z1zlYdfMwh1WWz5t{z$**i}Z!USt&lQ)nz9ErO^k)9U$!@V&x+DKvwJ6I-m95Y!~sD zl_;iNdyf;%<$=}tr>7#STz;(zxj5t2KK!nu7~2K(9+g_0S^@5|*FpPYfnMrdk7m+#`^UXKudt8(#s+WOZx1;?5mxKsgxJKJBGh_c8f$OUDZI6U z$t9tIo{SYJgT1#E!D%Ip?Fld-d|=L$;BC;63u%kf#^>h;16q(>$gsgOFTs_s75SG* zn1gV!fT`}qE=}mtr+%ysf|Fri2wxg1xx?%G;<0^xFBh>hQ7@2uxm@vFCq+BP2;raB z{~PQ4c)6EV1T@88E!zqRDEwH_nXWX z6rA~*3?x3ywCdhEU!Ym7KJe;vUwTYYX9CB1rJ8oBZsvTkPFn@AS^`AC4-+2-1FuC+ zku@=dl_42>pgc$1$kq{0J^EqOy_>bDQH@RtWr8US$6%U}L< za@D0|&H+eAgzo_I+sU<(&?XBBQ zz@DVNFW&a*;4d>s020LRrj4XVU*Ft>UCM_+<(dh#8~yI5A?1=LYt1U|G*mf;7uBA= z=24^O3t2*zwdcmfZl+Qz0*-_Iw*p~jZ3W&XqZSF=44eZ%3VipaM7r{?9h1pgzdhgkGL%Hi)TJM#&7z>*MweA4eBA7s>$<`&&)m=`t5r;% zG!@u5{2~_A+Ux42xiMbrO_qqto|jiPy*XI1GK)Ot998HyTO-@2ZO{;786y<0vC%HO zl~x66ynG;|?~%Tv$%%#=h-=qccWG+AefOsI>Gq2*#!IF%Wy<-+J`|L;WR8LMeZb=O{{iyeq5~DW>8@NeO zPWJ!w7q_$vsC^?VViy$SY>&srTl(RR8;a#cnsyr~#fxoyeif!g5ntThne=f>$d3Sp zESB#WJB#vVD4RUKD6BLtJ{DmkMjnyX45X#JaO#(5i6#h=uFZ{)k2jFop+!$byQs(7 z-I#6>nWD&K^6gG5RMS{xu}zgr=QYxAp`^KeEb0sT-fhYK_QNZQAv42c*<-HC0VWW# zy8V3lnBWK$vb&c4l|I#4>a2IKziuHCQIc)b)Ydmx6^;JtovtBuTEA^l^dL3bQHDAi zbZ>424o;cNwArTJsd4-?V|GBFNOZYDa*I#~&AS#nXivKFb)~L?64w@16^o`E@WE!A z43Ez~?yPgvQg9u3v?rBxpkUaf@CS1GZXf5J=n$((BViea$ zHyiXyd&i|sN#Y{5G-rFf=|ELUM~aeBiu>)|wLN3;g3Ghhp+3z)&gYk*2oIwM)xtfw z)500l17?>j{&N;UTAt7e6$>!*LBNWxVj6$Is+qCmBFL!MjA zjDO&;%k0N5#O}oY-~Qg}3jzl^)(HQO74zN*QrLdR-Q<9wZ=Ei59tf zZKdkVGt|}B`LCNi8pU&BF6p{*kw!W9|&47q6yG^&|(A3eTw|I7A?Yoi#Dp|bonl^I8=SMID5=)1QCGQ_2)Qy22`JRvqY(c&#pB1cvX ze>Z;oGzb*l^RoEamw>BK_lre-#dV95Z7EQus_;B;T*Ul>gyN3R11ubOiNJ+)hqZni z`S?hF(j)nSw)4%Xk6c$3?>*_MG5*rzWf_xF)G$kyN+?T7(T-Z_-lJ`PC5QKD)|cQR zS|g(&ph{tG9(cHClmAPB+_Cu+P9f)#y{1P-EW+xFV)}ZW3%#{HHjRsekDcA-D5BLe zB_gFnG`o+CcE*b_7PjrYSPl_VD7_mi_Ev_kbV)V;KOA225g_+W{<`s(Br$J)Q1EWd zXRaQY7$bYgAsQ?2=bxc^1au& zV&pnT4u)1~#bRC=OP>>HZ6Q0>WZ zWiF?a!zMPo=&7n`Gkkw+xI+5XqJgirxxm%Q!LFLyu&bZ!u4h@D|duxqXB= ze|q~il~`4i@voQMK|)H6{&Z5SMB2wRpYD`%VwJj;IU)@*GAe^11-u!>AMMEvV5!jS zH-_Uvzmum}z@|OpRy}vW+7*Xh?S7k{yYx@V@k?6SGkeq`3k&^9dwP_6%7G+mw#>y>etTY$vhni9=7r<} zAIfmeI7oMk67HLPWqOm{+i3qB*29kH?Q|;Kj}X&x1Nwb&reEI$*G#28oE@r>Nny8m zZ{~PJUmna)!G&IXDAv|G{j&J=)^_8!;&>g`dLqbr^m>+T_DahKDJOZF1YCEd}LniMWgqV!C=hwnuDjyCU2UF2hdG5 zm2%Bd>i$NbyG-lKLNM|LT`#jiJW*46_H`8Hiz`aq$EeBi+BKkRV7xILf%~h-j$avDn|rEZc3>=zjzqLKl%g?KT+48}>#^yhxL^S$qVFxcA;m z?Sp6JpcWBz%$YKX^#Lv;u9xRPj+hG&?A6_4pT~&Ed`?7zsLvi*P-7>zJ&)?STli%+ zYW#JQweia5OmzH|+DxzmW)Z9N22+_m{`!Tn(NsIrfkptMe3R~ETS!$<2H(m3c<(MQ zvVH#+wJl=Kwm$ii!8u}ZkvMsU_azU2L|`lm1fK|l#Ip+Pxr4%6X9mAbOj%E)$)-9; zsaR~gVj_9k1i6^kkk}ycR(|otw5fh7MY<+n?2Q+8Tjhe|^O;~m((<6N@aKL6q29Mw z-;6zJuV-ZB?P-UdXr@a=x>FM+U*W{R_$xHhHxAOe3ij2g9>N&&fHj(#1z%s^kQ<(g zjxMSYkyAzZfDnOY>+10mDIcF%JF49!PRGUGsi*GFjEwo&As>cZ=-BhwxWKQcB zR^r4&&Yn%kg8Y8uLom+Ir=R@&2e6Be7m`Y;T_og3@hlXBaiJ)4st`+Ac`jlO?DVw` z>qUV(0^Wj4y`F=QRBNXzG4OzifzJ4<5pxFYl%Q8FEfeNq(jV=);QtLgfzOdn&unCE z-p^n!rp_{x_moS&oJy7UC>%1lCN)Dm zDGHBbwoIPpE3O!WkjI8^QQm(1VZUuOtn-42TmwlS+V*BJtowNVapxp;&}%2rf?JJv zb&<4?;9yn8YdQ3n!*fC2K6idm{IBdau51Smc%kh_LPAMN$^EgWOBIc~9F>-m&zyg5 z)9w`=t>ob=KUbl-_`~na-1npA;G@I{F@}dPTj|*&Ime3pyu6@138;OfkrkN#&;R;E zWyFHncxyY-U7Vg8&Z2$e(f10OGO1Dnq7bh55vuX>!t0=N1TSu9@_ymbmzO`?P1*e_ zcg!@AWR;4##W0nAbWnMir0ODL6leIpSHWCnY$@>07`X?y8|z(MoRz-~B*<I+|Iu98(XxQF^kf{inBNK7YthR*b2R23+ z(MxPHuyyVpFEq64Orwl^6qR_p-;i*#V<31>qVKen-JSQ(?Mz~dSViLthCiUhsl!s2 zIMS5`5ToE`+Y80D3PGd*DdblT|974OO;XJ(LoAtoK{4F!qFhOq>XdyLOJ370)I{{r zgqDG4;P9310%Q$RjI#^0rB5VD1`^Z=*L-Ic8QkKjaq5SinqlIo!Z6yH#XKm8_$(~t zlE7f$DZGXQ#Czh|6VDk@q1ty#F#&8}#cqGeM!IB1JvbwDKyp4j_dE4)W6En&1z#3D+3=5qE$D7E2K6SbNt zSNz959M)ub;=sy4*dWRj_+=YK^ca3=QEkTT=Vkesg^mAc6-zuaEY4_Y87Y;SH)(tS zSjKm-_NHT@8>n!sz;D06&cw%{K0PUEccMsJ=*Xy}-W_=JCQ0e%(m|6i_DUkD{cwz3 zUA+0-91d58H^sm%P2gpBd>&zLb9_m>wZqZnl{rMkvRc2mQ82+$>o-qV=W8OqgO=tW zwW+3&G%$)#Tv*Rkn2q^trx7DDSLjmb9aXH}%=OhVP<;^W7r!Lr;^pET+uM=^sq^#} zT7H}$no=zwwA|!ZztZ@4rL4EJX46h>SMK*>_ncqlmDtYZ0#ms94uRxy*BaL-ru?RX zA1`daDa38C#IfA=8O9}m+1qoS&&vL{xnOpf&zrR@=VCC$@2+Cy7aYKC?sK6Z^1{M^}IU&lWYzS+QjMi~(Fk<|9}1=q~@bIivSNYtmhotfPHf!>iTaG}u&J*m>m{+omf zgX28?1vHcEZx>}=o;Rxe*lT&qzBdrx!OfNTaG10WTTB^>$`;~=fw+GR9jR8Rp*L`_ zcY?FFWvNOfF;uSRI3Yu7!zd|BYNsx_Q`L51o+4i`K}Sbl+%K4sigq8`os;|dsr2b2 zX_ew8F}=x{zJ$^!V!g>|GY8{lH2qa&%Vg3kEa7hZ)`#Yz;emDH7EGzBTA8c9=FSri ziv?0H`(jgOUC6QHGoqSCI0qa)Hc@r>MR-=)gR)$_jpOqUvkIh-C}W8+r{L!xh}cotkOS z?3K&;Jj&dfAZf;1@F+6!rqptRNN?>e%-ZIurwgZb73N{s`FvN4`XPgq*_kRjM%hus zp;XNvPMfR1bSUEWWI>PBTe|n z1ddB$Jn6va9Vu}8FQ)GoT>|1_p6=0@!A;30r7Bs?{@_XSe2aay|DKY7TECq~m)W!JCcrwaWZvtfPbX|a9#1z?uuhUw-g15QU<1wg8w&cc zQ(6i`j2NL0?J_tgPfeR(e!cQHMK$iiMxc3Y-;F&wa(%bJ>5?0EzvWGgX4u)7iS~vX zMRu8D3(6ao=Y~i}shtrjX$DGS`L7FfjRw0Cv(?E~9RIIL36bFV>k?lU4N@AT?|shP ztUN*1qGWfdH4`>mGCf9El-y0yvYTfYYiN!P%IJrP;j!!~-8(dB5NPS(o$ftq2MY@d z0vfMZPheTYyyE-*8h-Kq*c5H1?7lAB7;o=B30YT%JA^t)&TJeptHl9_{uD?i{o$Ha z#G)e@VKsrXzsXyL%?x`%dHsaG*BPA#76!*i?DZ0i^*LEcS-f(3cX{yFk2sq8nKPeCNAb|1o6MnjkDtxO9#H*82QXs=M6 z$UTL89xY<+|NL!i?ZRfo*H;M)bL`%bcSoaVD*5T%J(#DB^=xi!1BPEW+EnyE7a9t@7ZJ{abKndKN>; z1NUXS=|*83*(qBK!_2w9_uC$b&geC>=%+FMJ4Ttigw7wAm7?xCuM#fK*?+Is;W}0Z z3YX#J65OJe_IdeH{98<>5R)#vty?Srn{qPQOcdO8u5xp@kC*&Uz^D!DIQ|b{5$?X6 zQ|tlJ4UT4PswE4!YD|>WNEjLduH=}%&6y=|_kFl#JmjR_VhIZ#+cF4jG&X*)SnpL> z8}Z#P+w7WBcu!H!{olT=0P#e;GQhB1roAIK``0th2i)hK5<_PD{qj48mD-kfytr`x z?`-O=C?6F_s9sQrvptGa+;+wts;@uY+!t(wsWB1$k{SHr?#K)OyE>^YYR?PfJ))Zf z`;~4@f9w2WDj~?*GWf~r`0WvGD6x4aSkp7IPS(8Z(cR~s`YS>i@OyQNMDKbp$D|O5 z!H>c3nri8}vFIc73d1kAGRb4>WRYmZsqW4NPdRTK(z>pvrzatzNBF3LDBPfh!j2Q-ZHl6Bcpf9C zn!jnpW@=ZngFf7s+C4LDEeHk4*E5+8(^E5{z6=+OoLZA>h)4>3d0rC{>dTVC5hcJ1 zzoT!*0*3`HI=2Kzs#wS?o=Qrxo9rZRsP4zN+c+zBUOFX|Ah%`m%NJ=;xY%5i)B8At zg8=^A_G(#|{nDaboni?A0u;bKNk<@wQ9Pfw52!nZ*+W({06+(7P`c4uxJ9ag_;6z4 zJZZmuMwEy|QJs;TOXDXq5z|v|?T43^Y<+j$L5b2`uo2Vw=;zW_uGr_XyvCP>gVR8z zC(R?#c{g>h_x8e~TJlA33muo=i;1LtwtZ`|IjljGK1Vp7bE52`S}fZcp4;+GLW>BE z{9&NP@N*A|OLe&zcuc%}wrn~>PHEv@-92|9&!mXtbt%^ho6M#@(YX;ZteF@jqQ zoa%y&pKqz(w28`Fgz|L3Y{a`>Lc}*aEW9=I47K8LbZLbK{F^gS3JHrbK-~TxN*{V0 z&bL2y-h0LuIqTXJU$wTjb*Ed@hz9YeyK(4DRN!c| zL;xh&m{6$ZAK!NDc z7d=7qvb$~~Wx*hR*4TdQP7JA2kMx`UX>RwA9lPR91fhxDWAbAKCJM7Hsx#MQ1(aJe z-IU^kB$gCa?eHW?yU%!&3l~F=Jb=xC7o;}6N~o-syUduF*lh5dV-0)jEAwaUeFnJ* zDuC}&S&U)_ZKsi4w@ASU5yEz41(L4TrTEJ?#9oFyd#ga|8bLOQC_Hd5=y<1-(|OhX zSRBiulB=UgbpWG2<<2(GXCQ?TP>{%EBVb*J3@C$NL{c_A5t|NZ465#`XuO1q54%Tk z=BYRLztu(4X@eo13x!8$+{#$qFSlqE&Q=gytb%TljX!u`LwT_dk^s3YV%XQNFlO?2B^`P*odwMu=?uqw7YE7(G&=Lq^45>bz-XIN7?C zVDNq6RZt1`Zs=&MU`tB zji|iA16z-~izOJ&H5Ly`9+`Xx06BvMz_r|Z!8^c5+crO67VoWB=ui6WsYOE{XCa|H z2S-tS90E9=gWYOK#irEuXK=@`-1kUUYrnoL@`+-CR^^c+9v&r-$;YiA<8jc z6_Vge+N^NJy9OXFOfzJe46vJ0hl3)rO&6b*X{cTWg|z1=Z#mUu_R&4Vg0|;fUO_>5 z1)eupE!&NoPFtb=)Lbtsei>Bt*aBFy2pxQ!_Fw50u2nYN1xFYdkd#l>jsjZKW`S;z z!{^TqVV{HGgg9P48tdyx2#?5&MO>YFaTc(@av8zBCub%cep8_0J1?qTEIpxhGb%Lf zf;Di%@&HEjRqo$e%K;DUlca4_VQEO)ERSD~wdQClzH*!;wf zEOp^}qJgn=z#(*Ma`N}4q`Eo3d+p=})X5HY+_Mkg9><4P|FIbTZsvGQHC_2AGfed$ zmNIpu+)C{b#=&HIzgSDC#MoAFvFO!5`q(Oi=y=ncWaugjTDA~CJ8JCLXbPV*W-s6=7)p}O%mDd~uZ7BdB9f|LN5Y4bd&9ZO+a2>o_h_9uM2OD;-D{{ifCIhpFYUFch-}F4Uqojs zS?c83#KH^bNDK6n? zHKT74Ib}h9YB-{-%aW(}mIzo~r1HcH|w z;4N{8Yo~G8_;07r07^e_OQ7Y47EwpR`ibb1#8~J!nEl1SDx`E4*l?aj6FTScH=vr{ zMj#KN>TF%sViN7Hqx0f>syO&dALj|cj4xdRrr}@Ne`n82jCRrL#)gnYIwLw#)RFru zb;Bpx3eAEVgj4*2N-oy9yXQbS*FTE0`$eP9h9|f*;2etO*^)&PzVM9@Zw0+io@#lEzdU0OMm>(*o{y<7Er zcQy$J0nRdZJ}?#jUyuTTo5gKX2T(GJxU&bPWC zT}36f4e=r0#9TjUF4)a4@r9`nWV3%~SfPG{ZT(hX=9t|dsD_A$BJad-6v*oyCr!4@ z;Y6o9NMd;RX3d};wGC=UuHT&PjLL%!$3A^?b5DX8nlk@H3q0|5yD;(Xo-n_%Sh;Co z-GX1tFBw$qby|pdhY5lT$xmLlw}{x6J^d-WK~iB-_f?#QluZVR(MABuD|q@d@$oA~ zLTyJCIoq-?ikeAH<`{F8&hP9i3#u^wf7tr!xG1--Z7Jz4krI&ZaA=SQB~-c_q`SL2 z6bVTI0f7OAZWtQrkZzD}kdFE8Ip=-OdA{%ce!uw-=Dzo=z2aKeT5Ipr&CH;Xhm-zq zgfnt@zIYNVxpX4uw)XZ;op%N`?3#*3KE13gdgO_^Feu%OJ(lOUkJ%$NH@q){*4}`G}jL5lWgUsFhumjEosH%NDn%^dM;43CfH1BH{PfV;pw+A;({1MX#1%yRKQn)>u&VnDhLus5CX7_Py&D zk3WOhE_x}w^IQp?dRdZ46++s8@nqC^1tV{GPHvXtfS=8VSi{D#RO?&CNFcg4Y9a}e z+BMZeDgxg@`vD=m$}ye&MBL@3v19afywVt%cd5Hp$mSF|*p7vDar--rtoE|Nr-?!v z71o@-VtSulT2WCSACKdU!@pi#XtC>avHRiI`~MASGQxmbNJc?{nlhlZeeeVv{hGQj zuU+PURZZY4hywn3AK73o8frneiiyK4R;ZOQvQ4CN?j{t}N5fO}EHlqLI+2+NU^USS z;eSn&ym3a$@-wDQ$VC#F%>9Yntz`)8f6&+CG7C$!@d}jN)LtwDY_k7eiUHjLkiw!3>@fDX2B+o3b z_e8^gv~UBqorvyK$0#G@(}QqCBDS3^+lF%bR2L2Z81~t?j$iG4_SinX)~Z=7=riRN zhzPbV?7R08IXB9BOV7Czbo_ouB}g>klz%$JDiMXhYOD9W;nXiB!|FZJh&#^Ov!|92 zWdtz$e!cLdJdNWTkaT7S0&R(@;5#UdrUs93bKkqsC*5~oq(McT-_In!<_3AV87+L` z=2rpgrFKIiUiXM2rj1PBEcF4Hy6V*5;Cy2@WI7koA8-3J+fIpOD8p^75gakYzg}(# zacBU&CYSB_Dk3Ve=V8}-x$b4)0~p+0B`3)^XKNGjA~ze|SFaYta%IINuXl@kQa> z<~|Tm5%Je~q;Q8RZPd7$h-i#~%gnd6PlTi@kQx52sU4Q91Vzt^-zIicD%uy)?`Lkp zMRr=`Ipv}pVs0vo5%1~dV8^I^|Kv>!l6;1V>!U=jAj;aH%&zi5{JC~K?TR6hm5-#W z;Wq&k}d%YHr8|d9U;8o$1XZ@>Zs!!cp!S-lwv!|+D6fNr#I@WtXNELyZ;=51m8P zs80~)@1}k%D{tS*=UwYI2N7ml^I>{pDsv#5JClV|-%GNTxXm+x57NiUUv0022H%W} z@o#l)q=BflRnMILNNzlDyd;oBQvH(24L!bY7#5!DQ?}IfTUWkq;JKN>95sDzOPWJC zxv^DF^Q+@{)ZnJoCdSK!&age+w;bMm@PKyWz2!DAsv~$(VJOUG3AlqP#?_@H&npez zqvO(?YG=4w1SQk2KD3<&YswYE(XUN+f6?EJzDjvq6mVMy(ExE1Wc3k%m>SW6 zu%12f+g)d_Ap2V>DI}06?;)~Klr$pZBdAnSt+~Be(LiixH9-5$>cr{hZqP8`%x4){ zXhpX$#@$N5_}&E3U;9B$92e$v3ZWJe@o$0E2Yd@?gEhf+;_7`hHDPYq+c{m^D&BrK zHiHRQhF8?7?R|Wy?QQp>Vuo#^{)=Z<*NdFg3FkR?eJ8JuT!VA6l;&GeJ?uU@LUAYr zY<+};XjT&hEPb=Lk40uiCslBZ8(uE$FRNbF!`fgwIor$K;tB1K*DE3;0db=b=MQo) zKdOyX<4UF#VTYSWe{cWik6!+;GsA7c#ruuc_Lg?9_<+oS<+H1s#ekK7-!}d_kNL1{ z7=Oar<@NsK%45FAFWkv} zvcO#wz4d~E4J}$2uan!`qF->Kuil`axM0A1B(~FB(K^$hDDj-iuer>R(*ROdG{9cy zFI*3T6RP}j!o^>J%@cpQ>L2n33X|gUmI0JR2`IB$48@LGJ(rcLlBDXjekPnBMFzW! z*G-~jCj8fNXmKOnw-N`EaSVG%(WEVyU}ed+`~Kca$iEw1iUD#GY3R#UO-4YWJUI~W zP5gWZEvi#8vnpM^JFq#VS9=Xq^^uG{hA7{3;Do`uihjJ&s(-~qUL>kc0LTdDk#v=jT?#gSc%{hmMf;_iMWxZt zyp(FsS+EGz)#aIUJtB=JO{_GnBomyk)6E7vs%~49x&;1g)!&w-Tj!_FjX(PXc?U9j z^CUiF7$_cA@{UxGSURWZ^e_09eiQ{--KsYlpa! zG1H}lNqM!T<*ofwGBGYWf;%O?Rm6-W0hx^?M*C)6K!{o}o(f}42~HdmoRs1M1uZZI zg1>|+RF#b+lPSV*Y|U}j?>#5AnN}l3@`N{@k+93?rn5aX%5I396`QI|&W1?^?i^&H zN<{0tYJi8Bc4nynm>ZSAtDsCzJ+T`BF>dpXB-Io}hRWwNvBQcg3q!ukp9?VF64Rv^ zWQMYR8CZR}#g08P+EpbpgDq|+3J*F^Z^Bi}%%acV0(0yo99KAr*$9{#6(T&D9EiiT zOJl!)3(H5Wt*sXg40TQ8urEjwQiUV@<0Hn34cn7unq`vPfk#;50fo3%KUkE2_0_&u z?cD@8P|RNgQpk0p2m$x8=TEeUiatQZlQL^7Pth~JDwq_U3uY)3*g4~DSbohbI71jD z3Q1vq$~Icb8=LFrpv%{By=-$U^>1Q4o%E!4!fUik6m7;YeKmAIAjP02+VLmBNmFq= zQCW$Ik$Q6SGr5X7@7UrQ6SmgiErH6N39Y6K-tLEZ5dLgDGQe|D>gv0Z1gX=lvR`d8 zw0uHs*Bv~eFGJr~M{2W2ze7^-b^Y{(z_3hLm&p_^W*NAct5pFpPua7S0b-T|`m4&D zUWthvU4B|KCeM1K!$Vzb#$ip7)ki*x!_l7 zJ>cBR@$~eJGBO_uLj1Qm9^^&gEKzWtt_3fM5&gQksp;Ubex=S3b~^*K*Z6Oj^u8`6 zUHV9J-n{ZKvg~)PFAS@mIUFMA^vNl0itv43BV>qNM~}eI!H58>v-MHeL7x8nq;K?N zFzD4Q^VZf@5RaHu^|^M2LaHr;X-cqB4y&dG#B5WN#MG{b=YFF5T3dVUx^z;&-vUzJ z^AMjA(W7^lr18pdP>7lHUte`ig>bui64nzSpcrxy0>(1kHK$*kGh!3P;%?U+Phnk< z?a~Nx`m?7sR$^kFf<|1HTi7|juuz7-u}dLW3M}o z${TM=^acb6!?4=;&3Ds)4beihf#JRkpj9u0L{(76$Oky$dXqVGJ$rb|7L;6-y&N%n zcW|`i1L6YNVODCqmD^*}#}X`p(1!13sy++xLZZP8?exSldHh6ME>oMF$yevG{ozo? zNQ&^plDb8b1k;Pvn3KeLw(>g~#SQm241{7qx!g4KvX;aePw(0ElFsSzabtb`thxY0 zQi?EZ?o+%fJTaKB++$y#5{M6jSoyCk_w}j*;-lZ)<=oBrHsI|LIa^=;r3Zhd2K>q) zKVH&8WY8S%V}<+fTTiRO!^50fFleHFi|ZVf2nFQlP2Mm`UL9UZU949)Y0n2}nc8mL zSN8ZE6v1#=7m>)z)ZVc=G$!?bOvT6s-*Mf~u*GV|NjI~V(+k5=JXF|ObQVpEWS5h(OQMxhd)^v#C zF8~aIo4AAW?;Zn@MvD^gVHG%<1uVd?5keL??Wb>(la;kav%d?W+W-lG%r{(QGSWDW ziX9Z?Eru~Zv9N}+fS;k6uH!4;y{q+Jhw(UcIytSM3XhjOUD7D(`BDP7kpVodOfN?h zbPVq3Bi=G(QW<5*24-}|Rn8DblX1izFv>Y5$q+reMoRs_8DxrH67O8j&Qdk*(B2M` zQYZ`LXok)Nw@2rW=5N4oMq1j49i5GhZFY9{?{^nZG0#&T(C$(;)E5q`M8EO&sH zS0kpL_qg-VAeapLo-bhfoA45jveYOLkT(=nXK?ts|e|<`o z0!{bm7Z*HDcjy;^K@QjD^;OCticX>;BO0(h`S0wU`sjBZ=`39%L`B6cX6$jV>TI^{ zZLF>9($k;E$mW;oe=OicBo79n7kJ05%?u4V2`g+1eu#ExW99rw%Si zGT2nRFmKlc+$|$DPoDQv#gAyQs9Da*m!S;Ul(FepNi-OLk!U%lxD=d=M3=8;Z*;ge znUw)0JTx{&75@R2t(y&9mrK8b9N=zOH4Lev>FgJ)tt=C8Wpjd?#EMV*!a_@PcuKl zYG(|p4Ij@^>pYGPVtPyfYc7!0)R#7jfk%`WzV9V8h~$A!daO_m8udE1NGCu-V)hRY zNbcvoO3f?$Mh}zP83Quxn#&)SLp`VN=xWmnqem_Tg;PzQk|u`Vjya0|-AzkHZq^Vn14GKbF4d<>hshv#A;xQ5N>S zhP>kAV`KMt%$xd*1dAc~bpyP15Z*tX84d?)BAf`(SChLn!$T%RwvI!$;f!SgW&`29 zMRvYI=0C%N3kFS6FuTE_+%*IgS4cF^3|}`K|F(U`f9{XUq1wn!ENlAlEz=+A|3q^m zLc;4cttQ<@pK+pyjU9Tlo54ka+f13am?XHwWeoVWDNQV3>xGYpcz^2ar?vf*3g`*Y z`KN25f`07Kp5z0@3471y{g21{i(1i00Stn5P_-C7*?@(BlkRlpB$gGl@z^4|vs>#U z>gOI{w(Q+UYU>Jen6#?aa?6(?sC8V7aHNhmwui(m8@+Xjd8?vPX!HgE=76wJf;N-z zv+?rPf-~?KUO1og)^UlNydTt2@)~oWktr)}T*NtAGDmF|E-P&E{#lUDA^BJ5tPwBX zsvlk01qt;Wze(GAWw5-f6$L9WUAAQmQEDNOn&&0FX5ou5S5bgEKwW`br z5caMQb^A%!NjU1`>sqHLW^W%tE>ht-B_bQRVT5I-XxcOIbN4%Tg34UI$L*oTN~GxX zoa&!y9cLX4x@9~c-kay5Wt$efv46XK8xypeKwl>QZ(eBRgQvy!*GSOTe3bT*;*zPseJ@X!XTB?5R8g=aTXc~1>0=JhtEDOacfJGKfYv}*` zG=R8D286vIZTJ9jDPl^-21sPvv91m?o08r>hL_emVOmA;6sDBI4**1t`UHlmGWfs-(x-vVZHQXK!DBH zl{pgzl_>+Ruxz=rBnJ)5fv}v4g^Rne+Mjk2JEU{GlX($0Jb&WaOA<{JnS7g}Z&@aiSbr-$ zI}gb_Ji2vWn0!{P&yHPZGs6*=?~pC4p^)<#kdg}en}~nX1h|WU#sEEQ{|Pk7FIQtK z7#NCdeN6^9R;a5=ab@j3GFWywE{&QNADcjY-#1W8O~(+%Ib-Vnw0Nfj_r=N3&+qch zP#R82UC3BBNwDhfHFQCC>>XCZEe`crLSBpRb$q%~ew4@%8b;rx5_z@1z?r885U<5q zxhXjTwM&Kjt<;?zQ9zp1bkI}Ybi@!Akqk+5y%oaqT7aENclAO2@GG~Qp$zZE#*L}&S zIsd5~BwQ7cFr}ucP5ViUdwVarEc9<)rIr!EGV{Ee@xXEYaQBw1tLa7R1qZU#D<| z8?bD#1@?X0EL)t=(0-%iqd$vtR8ohee+UQWF?}1l#36gN!|+n~@N>PX|DG__5{Kg` zYM)IuU|%Q^ltr9#GP5hAep=lfth?RaPbg(@lvaOzorPkW?6{O*y3QRaDA#0gqNgO{ zFZXT~j&WA!&^;w_eACxA_)@b&@L@Fou-lV5cL!HbDN`@oH0xy8GG59Hu%YH!%7J-- zZ`8d2;#0j*e#skJNIV?r$XyTUAcM#d0;UGS!ca28&L1~z_zxE{t`4W=r>W=%!9=B% zRcF?i;ie>^o&i?B6i4ndzy<1h|Jli)IGMnp#N}UR`1bcbh#GZDqdHdp6VJP>_J0h_ zMIlkTZ~bzzv9@)j^%p#K0F$U$WoH4(Fgl`W`A^LJ%Q9T>0n2EF?7jrll9(Y;k-^T} zd%k=Hm|yasGWzKt5T!&6#w)IrBqB_dSa}soHu%am?37gyz6@x$)H23REQ+%hhCph~ zmNazUy34)F2e5~lJ^?NnzyUTJsm zI1+`tz3MyIf`SXWYE!zdzlND;kVJEo>!2gW9>2WTC6Bdc{2w3(Jk3TDnvucgWWL}# z{bsHPrCOEbXY4{-p|fyX(K3#Q$9xbHYG$PPYbhQ(T8IK%-xSD|dr1CcKftnQeF6Ee z_}Uc`#6UG@p+Eb82>l?HKP@g`va;CVKx!`nnS0$@AbCN;poi{*CAO8^?{&!a_TL>)CYL^J#5;NK%B*Ve(|r zYPX?7l&4_txL3fI;5Fx42^ATmEi#u?Rq>ehg!49k5}V{0%Ed^`6g+5GeL zXRZZxb&Db#_gPjs;j*Tt0TxzPe#0I$6S$ga(ZH<@Po5sAU9JMJtB40)d@_SJc1@e~ zbABn?p!wn3Xt~I3)8|hD2NjjS?*3=3@Z~xq9=ow^Q?-MDKzGsclXD&krEN%w5n&0>IkeK+Qj1 z=M@6kdK@p+xOz{oM@7=Ju)Y{8sWRef7<%yDcyCG>9qC*|h@XmYprr-5z4hMT*@5)- z%9a)ve^Nc@wE-o6%phFd#;I6{K$4oCoN-7!ikp{Vd&pvwwk;>BHv)HdR)${9(QQNQ zmUY$q0C~M)4D>0LbniIZ^@{mz$(Eurzk8(NbJ1_VfX(<=M>%}79DNn}H<)P#3^Jg( zwqcJIvGgtAsi!tef5{lJ;-zJ0SD~jF>NVX!IhKy9vlXfz8v|unDkXJfhCMbepNlVG z43`Q0W8AhRh|i?1>KE^03wFDyId$ax`jw9yswd@y${KS=09c(h-6^h-Kno#*YXF&! zg86?}O=bz;HbOA`<4q*AknZ&r*HNbL{oa7S57(#a?Bd|f5GPh6T;w(RAj;~*jBWMW?V{6PJhon-Z_iQu?`$gu!0rN0gM1>&fb(WyUJjYWrB)H3XbdEpb2mZl%n zK&)nFBwOHPGFR8yv*h0K98}1NHZI%{QQbC~|NS;CgdOgl*faE#NrK!M9gmkqo81r7 z_4l$WF>$xqNsDVgv~&m_lQX`k{Tor)q9MAjbCR5Eq-1x9nWOGuxHq(!F&YY4F&cRP zL<|Z{d9sYhULY9>y<`9e3S!qEK7GP(__bVlbK?b=Pi0{t!$PAAqXxD@!)Qrz8WYfs z<8$?D$^cm-9SBdXN@}}N&}kYjK~YCPf&ZKZAho%2qyArv{VXkV^i2(MwN(sh(g@Zz){x_}$heiTPCB@IU<`{YzQ5*Oh{d^r~KB&7!Mbd+8hWv`tZf(BQ8m@y7ZS*gVQ4uB_c58ncP`2d` zl8-EeXO>K|^!2AJJ&rBP(guK}3t-#0Ub~u@bc(_5UG9nkZW(?4?0h<0Ze1A361EHg z4$U+5ITa+1>CrIj!2^Jje9`+?z#K4~jtJokjRxq5$k#$ypre4>KY0&p8<-%wS0MG0 zI-{D2fjpV5D(rfYWJFV}D2w;-ib)U9mk=&ThU z8dweAtGNS$`EPf%IY9h%tOyOL$S{MW(G<;biP?fB%^eZV1=D7#Cr1i*Rcynu>jKk+ciKc z(V=1OSoGg91H@7*q4&>*9K#RFOnnYWEpcZ@rY-8J|Vf8jCW!Rv3AOH?$VG%`8!d8 z_+k{i6cBo zO-%)E^)4^_J_I2BY-wOkjPJ*7;1d-@F)|zeH)rN|xBC@`ds<)hsd2TL-aF(*3ke=O zjN0e^J5*u&0TFA2ZAe82Bxr(a9PZLW|B(s;a6i zYGsAP#X_GZ^_&)ib+J*IHWS7bq~C;0JQFPTl@aU z3jX*xnCBs6YHCW`j$Lf#FJ}rB!uu8ojP)G&;J_QUTmW(uC@TOiS}KF-Eub((bnxKh zH1VDmLWu@YssiURqiv7VVz=wT7u(jhKgNu5wH7u@GpyA+y0!UKkmxX?5Ca$O<^Dp=V(M3S?!@G+$cKM8pA+kj^=cJq3jt z@YR0l9$m?I-6Ek~#a6stH-59mk1>YCO8U_(LEfPyE;;1TFf8`kaZ@(dLf8!qP5Zx)rq7k0buxC`st^v*&EqAE= z(hTGtb@YHR>+HPM^^ifLKs`p2O`GRy&9B84;wqV#u)(r?yNX)AI{Vw(Vz~XNs04OI z($Udf-ozo?{n2S3erjzC(n5-u{zfj|h1D_1SV;FzJ_-nYI_q_@7Q}?cVf-MpXM*l> z{?pRsNU^ewm3^JX!MwClKEdoIa4f8}x~pV~FNaN|UhmmW3*pNf@u8Pii$9h$H+8TP z>5;1(F`srhZewW+P&f%!Bu~w~*)sGI)bnY1^gQIQpBiv-+sjpxS?xh{WWsYnCHV%y zc`Fb`w_Hn#^_0CIp-X-C%+X~?(l;X_-;|2K>YB%lEv~#(-Kgv(KSvO4bgvDk7KvlX z`Ta7c%<8+i{luA-HxA3KML^UYR^J7JyC?E9t*(cyC91o#iC4{HU+RRD5C+2tNLVU5 zQ$qn9@0+c7!~DCc&CQ2OXAaJVNfD2W z&LFXfTJY%P7nMHS*N6=Q*M{!K(`T3pHtgiJS!)V+b2t147cQ1;!7Fi#f1AU!#{?p__;9^%jmlkCI7HK{V{j)^u+??MKE$o;KXP6TYl|lXb+vTp z*(vYVgQ@1&S>-FSnK~~tHV4!8Lr`9J;70#FT0l#zf6i|`2<)EoLM~u&T0Dh)GrmvU zf4+3iduWC;BxmK#$>pT^+)btx>OIzV9#9kGK_07|RMkk&fh0BOxi`_-qh21@+aDn8 zh&R{(epA*+7xUUr!Wkc5040)CwOTbLldQ+tia8@9iK(6VBCK}22|Q3ekIa;{O~qbO zqt!yd(S)s`u6-ySdrzxd9E7O?)sp_B+p(p}3*voc;!>>r))?KQCDLZJSy#_j>(H9j z4`0gz5#`>RZFS}Y7g6my0qd!$3k`LB8-Sc6iAkx@y74{bLS<5QQOHPtR@K-@CJ@N^ zjP657jG=GSr3O|b!Tc(+=iw$-iPV}u(To9oMQge@TRxyw6unxF!|hLjaX4qiGdVEx))_31a#UCwc4bxyleueFU$SqTP$&fmEZHXNIVLz0t$ z6-E;{Za6oPr6R$iP1H?jA;oB8^6_`|GxqagZ&&8CiW8T<#%pnZup;TfNTGnUU$iG` z$ey*Hrur*!R}ZuFzvIfhf{o)VttbwP@x)JGxT0=qdYX$NpSeD8wB+Njl>0c?Td>IbkiCv z!~XDuMyo**tHrf!-4UIoF*#Qj)$?LZDvdpX5&CGvn=&iWwENW^9%85D=8D}-n zMx?nCtoO3c`)vOT@%|B0s+bBJl`T~6ab)AW1Oh(T>%dBS@`e!2egYX)Iv61)l*Vye z8Zn{I!yTW0n2aK)BURe)pSc^;!^_)Gp#6h`nz}lW9^IZO zdBv0(E&gm1JP&TND@~7rYA^yV7LLcjA;cOER#Pg-{+UojMGDp8FMci;<$m6$;TNNQ zwl7o>B61o~KKfTO+25ZM{fP%OmjD4vrr}hKKm4fz|Kp?b+x57dQ^WXOClU4pMhe}H zkvK#XI~BRWu20aNXAvZQff@sz=bR}|?B1+-dm(SsJR3C=cHY+F=;Lj)i^OM6!QU96 zc)RtDyYDM$l|g{Z281F(pJLzqYYbDf_;f~>C$HOcusqrZ^gRW+j!24|DZ8Nm@af0t zuiNF*y@lA{u(*GZo(YAJ{Z8#@;2S(?F?_F>N+AGB_`T#d;C*L$Hkxr{?ewmR_T(OU zwu3td=}?s>wel%>^%k_D)qgLt_4JIp6b!DBMnuQ}B3lUlYzm@(zz0^;nFm(}Q>j+X zvh7|*68Ysi>D(29++cNSuHxvQYWx6hey-GmA2jt6n!&q_Ho`_^$3R_W^1w=B>p$gw zcvQ7D4RTQUTD9%5zJ0M>;mr*?>-VPCs~y^*k5M$Ni>^W`jrHcAZV}uX&Idd*4)Qf` zLi>g1;d@0r=xE`oZ?#f#t**;9na!R+(2I2IQt%Jg4$jf8&>4f=#ClthscF8{^!aMI zxg4i1ZCLA<8i4@M4Y{)N*6rL<7Z5K3PK_fNZ-e9LHiN~c_Ggvy!noIf&Si@kF)@05 z#jn?C8BE=^9H@`Oi#BTR__GbjPf?#9j2tdpDvf^eo8z^tF2G<-p3xx#%EM+%ejA;t zKQ+Rh1Q@Pw=Vz=I$D))Kuvn{C;z^%gAN7zrvZz9KZ(8Dp&Fz8=duIi3q)vV9L~NC! zAk8Y9-6&y4BK{)D)#Hts8EiUnE3qy7+n(?IfG)313B`9POTUL^ZE@O@&Qp4^g^#HP zPETvd1y2mb@v(L6yk@P}X-Z`u7_?Kh!I__W23&to@1JA+6tlY&vd6?b>E+?h{;aHn zzJoETOWCM&U7Sw)8j(VH0}nZDiQ?F%EOwRq zH*)7--`>vLQE&Ijl5h-nt67r~?{15dsc&3`DzqXOi{N39T-%9?H-VAwd6ZP>a2%p* zTfGj}ry9Z}IWFbqQikx#iNpf#aKVT=iViJ~1(()79&Oq9a1`#@#b~E(*;slPF@8)C z(od{g9R$;HiP+QdvNl>|_77thhHc_er<=uIu~&Ii6vy=a7(4XOxhw`Nb@{{NoUEUI zb5!&+18wXnkKes?I~$OwK?fqfrd_0rxV>1$qGYONNJ`lKXoHb$qnvj!45hDi}hZbj3Eub7=cJ!*bB}*u~n2bzxLQJsqiY z8uH;^S|9raV-7uu`IDN`4{CDu$A*nb^t&b_58h zYmSHptW2YpX-xc zKQE6?u|{6wb93Spbd3(*7m-ty#(^C{Snw{zfKN{<2&@GpiIk*f{fIXF*wyi4LM=^flCmW>71KT_T{9L^ ziEE1ZvWEFxhVjSHMuW4P!T^$u5v$naUNGg|a&N;ciD#qiVQp7v8&$ZBTV<$y@4k(4 zA3Tj!pnmdWX2x=Ee!aJunuH!x!jJw*&M&}@Yf8zzUhTFndWaV^1& zWL4d5Fso#r0Q+G)ycxBbb<9!BP%jctOZI?Ky8yp1HtLCg#?jrZ#nEF?ZD^8v1F>>! z#z0n{X|&=(tE{bFs7ljWl6l?(Y~gA!2c;vY^) ziS~SVR)iByy1{|s^04b4Pus#<;@X=ZC6&Bp&#soNVz$MG-=X8Ozu@Jl&_ngeDc=fh z^@5CF)@qpK6%VnG3kxN=6QWSBq(kJ=xPGpBd=y1K*H3#V7^{$)MCo?4_)*}aD&8@! z%J+jL)XB?jfH%=DG14QR%OLr~Ql%y0TYS#HY4~HYs=iIQubGr{ty}2J+QG7ra7*<- zI=8V@QGUg-(mcDOHI`9TC@TW86UF&;rESWDi~$zGkjZf`^4M1 zI2F22W(W~G(8I<&=d*X&zKdeBI5YKPFOAQ(*G4q3WZkN6fSgS~Q`WW-v2>tVF}1|0 zGw!Kl0Y6u-G`FdU2C`jSHh3)#eEH4_$ltTGfw8zQzeWn6h3IQMQBaxeYRAK!`+hGr zSWnO<=c=&mS!v~qA%&&!{S92=O8Y^dgY)<+?ySviv1|^QK^u3#h?A3n zVDM>^ee8Lw3#23NPF=!N@jdKpm@wEYERYb95j_m~-`0?>EMf@`Sn zdt51%5`X1&eaxA4N4L%1dR%m3+IMptpox4nFmWR=I_ff)OEpNX;Bw>Sdz88qaCWW5 zaGocSbxVdZ%FMQ%W=vPqb42|g0S*oUAu>%{)+3Dq< z!M6Kx@raRITbuNO8nx7g8@f36?`2PCl%}7KPv_9>9E_||Xkak0HEsF`a*o1$b~zp; z#E6c0xmKU;!RCWxZN6a!2$-c(Db5%1%P#Ffc+ox@>FBA4WUNq(^7I}l{}QK#X#qnYAMCXjODI8Y=3+x;R2w`!Ut&1h?%Xs{dD+UxfZM}0SqvnI5UdI z%$b4Q)}OAvu+mR_;3o_keiO(aMRa-<&oZyTj6!v}Fyz5Y|YCyzhtjeKot?F&lS96Os< zowbo#tFD8Mk=Yyztv@{X>t*3>rCr1>Wav9EUa=cY@HejgnNapBz3wtR?T-7p)i7-| ztqr(SuT0wLBM*|tQmm3jHe@$FXJKUN&bF^ynZN1cAvbNiv=+dyub2^F=ckj}aE6Nk zdhF$2uGchW6L5VHy+o;G`YH!PEakYGpxz+i$DjA`d-7tBW{iiK^TL4GO~ep-%d#lAdy#gzL$A2Ic= zOP4~?QoX2h24Aecv1lOz-_@ZD}5eH~p~2bU82mOX*R`B={p5;^UQr z%6xzUbv2vxpZ8`1zSnnCng>4S+H7>oE}NYTnf*FqU&HeJG*w^%6kY01_P9TtPCt4J zH#W~JO@Du}3Okri%lNgMAumkI*u=vvS~!}O0gHT@a((Vr{wGPIQx3ZN3X6zZvbD)$ zX09h&A(dm_pPCiZvGeopneP#B;&;i6f#*7?F8OMC9n*W33VDoJ0R1O@U@0QzG~V!B ze2?mw&AI;9Bmd_^#)%Xc#*1z^D z3p4i51!x7NpuWD07H;vjrh~0na#?2~oOBh(`$##*^FEB5Md8%ZmbeWo-(i(FQx$R0 z^0xFO5sDPD??=sohcm6}LmLGx88#E`$gCn&?Tv;U!fh#E8KfyLYNwGWSRzNJ^dk;5 z>o5}2;JHi>ioq@1V{XIgpQRXE4ey!6wvSS6yt1;@NI6mN_xFAmtQknH```Myc+GR* zK8D2Y-g$ZMo>%nndp*hFvGYjuczMj5QrB~Jb9Lq8<1sgQ!Z)c;++D?Dj8D2|KoCJ~=?d*^UVx zPghL)&##}>am|;v@mM=_ec5E1_kRAAmCyL*_ak2~&zfzV(6#B8m?BTc^_8B3)LVSm z*2r06Q2m3a50}0;Hr4)!sRU^wH+m`t4qHSZEFjCaHiyC*LVLT)G;q`2PFDAJ$Fk`C z5%04E(c1duOAx(Y9?+cM%%9f%rg!pke2E!skyz*X@(C&|G)qn&xARv)apB^H{^jQW z1Mg)=9CtJJQHUz0y_<%CVNHl}qZWgvcnUUo1^&@JZ=lEZPU}YUTxpCZG1chf=V?7}|S6y+f7jm1(bCtOW{=S2!dprsIVF)OS!zz<1sk@G*PK!sYv#`-+AO2k@ zz1?huhRfwH_bxugE~J-eW5M4)ey5?2v)CGuU2OI-z3T0Ktfn>+?~iNcZYFQmOdlWm z_%aTw6-mLf(}yoG+^*(o8G3M>>d>x6h>Z3=FOZ=rn$7jJ>rs0xUEmJ z@zGT2y(fZg8=E+<*~D}RGK#M&n7-yuT(~`=#D|EYJ39UP8tT;}yC_n{M+G)6bM_Oh z2AZIN22x}CdsRn*`f`1OY^~OpXC&>equEt<@+=^IHm)+Xdu6dJvHrUQFBiovx~?KKaRP_qOGm$PA7y8Y@eaAO#Ym=RZobukka31VNS1d~<}Y zY)<*s5Pr$4v0+DhPx}UMA1ST6%iVYS&7Wo}qw34VtENVT75s!mgy%Cl3talTz}cf+ z6J&jo>RSTQh1Imhd*mc{at5M;Y?V-tenbw~?1E39q#Q087msGxbS7zMaC)*a$IHit z_NqA2F|lHEwmrp{Z@OJZ@j1AchZ^!X;@y`wa&62pH zaOUG8-)srOQuq58Rt7rREN@HBwR(RKyG$TI@}S=fh#BH-8s%52of(@xO3*e2-M#6f5dUnKt_}UuUXQQV-y}+OW)Z(0IE0>IgCXhW=9VVx zal}_GJF(b}7==QWg7WgDzP2_DxHdf$xcCWX)8KpK40QPZBt?FlpPvtf!XQG7jrM=- zgiTQ7U}d2)+P$Go;qSq2#Dp4@OUhDQ>SIxS4%hgIx2-KRmcPRU=WYlg+Iv}dpFpoC z8jisz6j@f_(HwijRhe~ZBX`5ZsGgsm%LP25h0y#zH}$Vex-n3?CSLBTr6@$kud5J= zC2{s-O&~leE-iShr5M!=GDK`A<1;ig#7}u0tMRElKY*>|;7TH~4gFM*P}14JT{cZK zlb9sWpTnuC(T3@OW^z-R$MzC?DKOEW7sL8V7Cv%j=Owsy^pG55W{iYe{zXINT>-cJ z;i@2Wvlegs+S^LLNXOIb$e1(r=fY~k%g2H|^D5)m3B)ti!zCrnQJ2~#^S$_~8zMa{ z8&|$$gO~Te7?~xRwtc|ch5O$qgCsZndPiFeMJQMZ7Iv=e*CVKY6EE6nigo{y>G^7m z?e0&2A0gs6p^W_A{d4Z0nLb^u$=EOx`O9jNY?sMP2<2t_%(%bC$EVG+KXRH+EnC73 zg-O7lK`IW@5cuCnx0P=1Vm1T??Z^jb>?_;B0ZrNWEj>A8WYwK3_F#;PTs>;^`^1R=Z-^iT;^0!% zw->f3!VA)MQ$2WpmWI%rYW=vPYkggtkdp6m-$@H=4h>_DPA5m`@Vq6m7>zn4?Mi{d z@I75cI~Zr)cT=Ohu6sVeP+73l?R3NR=r47US=ZgR-K9ppQIOwiqC0W9rB`O`)o3P? z2!(0ZnDd7E>@x?!HdO6?At1tL*@MkQxYbikrwth&W6g`UWcHHCez7B+h{bSMSvkG; zw6de0qu`MuL4JJL7V3<^yUy6{<3PJ7*?$&Ux9QrKDA4?@8C1(|Fq$*{YK{O?(GK(b zQN>2rgR!sK3rBJ^%rk1t1%6K4Q9CepwFS-51AC;hq8<6m+B*(yBOQWkpIdJ!)wgdO zmTtBCt-LtZEdFe~$fl&$t33B*qSrME+O5AiLfn#5ZFS9e_?xK1k9(-n(R9J3b8KvlvAN34m z{caw^d&KfmRMKCSu|xUr)^>I%akCPyGk7$@fVK={8stZ>7;C!K&eG)^JKF8i-j{?ugB=B3>#ScWBIquK$yK?RL=!TfmsHrlt!Y0>8HN~=Ai+|5B+aeei z_!dlP8@YE7$e$w~3K1c%_^o_6Ww99nU$adx^Sy3bHOLf`N?eO@N-)dE}U%zaPJ?=9N3u{Zb&IHB>V$HS1!I3n|{bgY;0v)GB`Bm6bR2`Qx zCC};3@Adww*150M`STML^A6Ul`17K&I>!6_Tp}dk>YT=-O(#WN%Woh)-rpOsKiBSK zwR!WC93nTWZhR@t?sPt=Y7&{t^*m(ZU8@UCDD(s&=UVo97l-~*xoHjAs8lM%s28)q z87nFx1Sh+sd&^4ct1EkwM7&?3{lG1?DlT8LuK%4yEMjsW5&4m#yfmMI`VP3pMkGXN zB-cR5PtE5!RAE{rXU;JHk7kvU946PdG|hy#i-=g=5aY=_iVta|p~-_J5%g2%Mfnn+ zK>)qac7n&oB_cur9E#lubntYE-*<_J_V)J5uLb~O>mqk(!4DW67`WBQ%#A{XN$sru z{tWnOV@zqicoc8R)qqf8K`R}TARn#Qipz%~t$ke;%PXYwebti3reVgtw#$bdqaSql z(xSvaQ%9i`hN4g;ulo@MZmLG|N1fhy5!D8|v9={6i7_y2Z?DS{l=(mtZ(nRhyKG9% z4sYv)@pNx=f9>x3gYVOJJt<%w#eb2v$1!d|p>$T&r(Kt^DV%fk@`O{C#-BmJ73urV zL*jXccX=h}|=>XaO^tz5heiS%x*?hwq;5 z5=rS+Bt_{4>29P!O1gVvw6rwRAt4P?QllkgNH-&7(hUO!JNy05IoEa0>%G}^ZQm#E z`|~`{#lrjMCWvc5Q;+V!3dC#S)qCq+N&@f(dyC_&J0`AcO6x?#uev&R=>!xSFLK^@ zq%mHB$gsv4a#@`SVq8>SKx>+M_B4e-$BU7tzHF~FBsEMawRmXSyNcGhr;ZS&Y%7ktrjRV#jM;Xe$QzR~6y~R`x zU)~7j8z271VE6|QI9lQl{HAoTl`34>Qh#?Nf?3L)0?j$uA*+5$-0GS(nrY6XXVk+} z(PFfJZjw1L(P|TRCOB6}T3(rqaK{~A;d^RNnL#!Oyv*3$^+V@ZeRw9Rk?C}R_uI4pX>t(PC%DvqZc|!CUz6lxO+7z8vR#%p}Rh> z&F|=^go8H?sdLDyjqN2saP(53<&Rtt4imYgXYaQnL9y4#UZWLl$#z8M!Qtn&7MCFcR4=X6%R{N)d z2kIw;6vBX-ONaqrxC%m&@LDDX47|8H*AZ=$aGeyTAKzOeqV0bBvr1zRs&I6+JEK2> z@dccM(TDXkt3bJRkkne@Kj>59Fuc=URAIIC)N$}rpg;raQ6)p({6HGjYFar4@M7!L zxGzNzCp_-(71C##K)XJoSAfuTMx!u*`Xl4hX>1^%p**y&jA{0kGLlS58T}8lM@B|Q z)WN}lj*$@+0U@E??ma^#2axl>h(P@V3`PqNGB$?8!ON)e3I_b1_Dgy9`R0F78Z2wT z{sgY~+W3SW@)SR^CF_NppH0PGi;l2|AJ+Oz40x64%Z(h7O=1sOeZUT4_*OzPH{lfU(F8pxJ#RtK);2E+(u83K^~NtuMXpEkH?T+ zvz-@|7|5RldYOCgV-v5hb4zFKWVE_wMP>OeLo`i=tJ^0cNhk|!ap?ak9*ug7SX7kUAcMw4o^dH5XMN1uyi&7?B;SpV2*9FW3Bx-MfL&D|7hzVIKEk z5cznRGGJn4|LbKPi!{^vr--X`-X^fY4_Sg!^)E-TIFMYLF^$5}m>bc64|`DLY9`3* zy{#D6y8zPYL#VQ&8P2mL8az~Rhia;`VDXm{;a^2UHCpJj(j69}kvslm{V+Dd06Mk~I~8jJZaSk^|nU`C-5C(*e!BrOVgq1Hd3} zz{|)+w^v10|A>kTK5^0Y$WykP8d=a`gzfvz#+~tcoI8fJ`YT? zif^e?5-PUj6eOoKq`+~bev_qPMRMab9f{0UEpD=VxiAai*41jTZ4dKy9kN)b19S44 z{q-J}pvp42bv^r4t25F%>(nPLtvIOkQvFF+iIea?WNeeTuDW?fUn0C}t zWzUCwYcXJ+4>5VkZ#}6~nA4j3n*P_sv$krU$+-R(D?4F$h?^i{;Y)hh1M$kVY)}1k z+XaFp@-U9rtOIcEMEFr+j03WW89${epRyR(1tLf1HcIPyh^ z8-6tN)L3l@o3z6;=kw<8l$Y_p2;uLiNk1i=s{AjkQ}U)xIfS&dzgj!!VSI9U_<2?a zTZlm*5UJVOB9t}xbIxBsSg(-78{WN(7As(ijXw!cxH}SL1mlB%U#CFj%ePJ8{Cwo_ z0LwyjP@lN##?vj?=k}KHZra(lP1qHG*s<-qIxs(|?1 zKt3u*Kzs@qP*3@(O)VJ}vx|AAmzQZXwPhsy#SnnaXaRvT=3S$?sN9x4#Z-jcJX`0~ zU`zOv5CfICsc*;rWC-IoMh4tSQC{|flrodQx67i`24yo_Y#+wJo@&;GZGX`AbuvGu@BflUX<7T4@SA9rYx*^h>=$P;+c2)F7*xte z-kX^Hil*TP)lfXH-jnV4Z!Rc#nx?FSvXy1%5nS=L9Xue$DC-p(s{QPYAB*lm_ z->p*S8+|kNjm_pC?BWUrpq1rmh7Dy(7csX7OHF`DlC6;eG#1?V92 z;iev3Fd?%6WZGW=ChNdbXJpVa1TQ36EzK?E$*h7VDdVcrZAS>G@$g)s=J7%qVehu#SDX(-MI^!LM z=yJ-U@yKNQ_k+`DNd$hBleKWYgwPylwhl8T3QR>u%ucmh|EX7=AuWf4j0q$PAa1@U zP}$jbdfk7smnNlO`^OsRnDv=?S}Q`>dc&jXOVv)>Id@2vyA=jxR z+9J=+baH5g4XG91bVsnnN1>dK%5u2*xc+XrwuBw9;jsR`)1N}3Rr^D~AYH#@fYm>O zN|u%ERJjszBE2=E3x7t@nq?ezh#Bu8TXLX9vsC#J4hB^-n4_JK7w3Tok8r}f^Jy|} zEO=7v9>#McO(5a(r4L=z__ zp!P%1-9P=KI9G|3OpcUOd)km^$dVI3#To5M1piW|4hEZeL@~`}#w1}`##yLo+%o9Q zw3|Mw#T-`jMhMowMqe)`!(>}{mTD~<9U(}<+W@=En$_sDc!w7KB&qYKl#)JXa&o$e^}gCSNdZ4 z{Tx{6wKcDj;XyFPA_;f;5N8aeA%=n)-d@!=(rmA85GHM+m&oe2Qb6-&^3FojD%$xY zYd?nVR`Q?UlXfk7iv9E>>F6&VoTqPAJf0~Ax5&~UtkJ+-Tq}(eo+gC&o6xkoVe%`F z82#+7LDf-o#W1(ttkZ*kDTb3iEuu7m&^w0_6R`&7?q+v+P?1-6kr@2xOIB5tOr*<; zIA+Te8e#P)TFjveyBIv8wI#8*j!A)BG@JpB@s|3V1~;wt)iy4aJW}lh`cv;>^W*Xp zd`tiJz~O3E?8U`}&W8`;lr>-fCwhNHX)J@S6^CEI4gXcvGy7Fwq@T8~acz2g2tx?W zD;<)Hco307T8^6S6)b8ng}`9x4KjDPAq>C<2v^bJDe%ZEpzxOAag{peNd@|!3@Af@ z*u(%K=cow~%JQ$Uo}5d-MT`>ZqXRu7Zud(eVKSYGV|~a@m?q!r$A`y*GtGuvX=w|< zJ?ybQUHW9IDC@SeFvQVRfA_S34DL}+II$>0M+xt9wwqO4eG5X`KK`i!_i@r;KTV!1 zf`Hydn{KWfF0SjxCKEpw&-V|@ArE#b>O~@Vu%h6To+%aVodP%NLKDYqiWBJ7GvAem~g|dx2(EO2bZ= zvdGy9^_}`lX7NDg{(v2UM%T>mYF=p{(f5j6jzAh4gmOJF!r1Yn&#m%5wQz`4QmsHf zY8ZVaQWI%T9s(fttBLTZMbGV=8&j@XVLeLqwMG1j^t!}LW!<%d3Pz8@eY`%&g%?!N z&`|QxznuB};?}<4{SKY2Up+LA$pq%Tvj-WHYtxj;+jAW&QZH~)k-_$E*ls+ZxgT;x zwLf@8Ty2AOY1LwtIn)#?yccLo>~t`I&q=5LnmKPhG0gkopZ3{-(H%t zfN#_H%XeYP!ZH59NZcgZ^5pY@h4@E9$$`;q*VzG#>SRWyebV+(oWV4`JtIS{rM3!j z*Q$cioP5nQHMP$kA9C27KmI3glMZ7@zYb%{z+0T1ea?PW(@~U}N$Kt5v(xkIcErOD zRn>3CQ1%tPnF$N-tM?-o4f5$`|L-N(qK|r^QUrI;mpQg3aAa%vawJMPACP!B81(Qj zBxt|fi#duPhWi>MYBjdtuF{RoKba7A#K((fM$kh)lGhcP8B$S|vRdXRh>LVs5-$D0 z6U|m!)%!I;K4mYY;7o$-?h{0$g$V}Ey_sum=iw)$V17YM8JuXvkQ;J1uQCQ(R7`$pBDQc*vlbU?c@p)!JqGxmTiOLtyEk^ zJgwOljqJ_W+#SP|;dqAU@{J=wsZ)!fOt-36L>5pxi>k_+RK5!1B(gY~3W5t`X8As! zpHgY#BC>Bcu+W1sKe{xH$TfR@&#Au;CiVX!ur4OR7|Eo@%4bUEwzMokLAg||^h+7E z(LBoRW7Z4bsKk;|!2rGpb;mA84ySHYKIlNpfld_OaMA6?Txj<47teYt-L-k)q!b0t z$_+~Ec>AkJEcngS`Preu;-LFVR|dcg9Um~by`A&uOP}aLL-$$v1JnHDu)>3mhzRCD z{r!!}#`Dx84l91<2{}&Ci!x~bb|*Enqk4I-Vq=uu_Lx01-?nXO?cR6VC{kPv3_9i) zVn1Sb+Kzl)$6`j5>mr4T^Dr0RT(!Zc+m#~x0?k%}(n3rNDu=6hWBhN&FLhL6e=!(! z0A%(2W?HQW*!+{nl|`a_DC9HD;q~wF90mfKA_q=G#lNgkr|V1e#h9Z7xytQ=R2=as zP$r0F-UALaU zxnFmo3AKSvvSMHk)pMK`IY^uD{A{%zTs5XhcNUiUeov<{J=W3MGu1F489_wDwf*}h z(`{LsIo3^B;^g+1^aGDuCvjoe;8XczT;y{E%T0Y_LPzK2FK}7__?WV4S&6B^ZY?jV zzX|Zq+)NZrDYVg9V8QS8rqoo}fZ!fnLJm5F@5(&3$(&`pXgGXchk=wH*$q+tYDbmq z4Np7#q&C2b@s6UCcFTNJM0>9vXh4JdzzN+8t(tyti zH8==GrSVZdWygyciTbETVP1lUMt<`Sfyp8nAou*+>WR1Q;46+Zkuvb+xo(a-7J!;i za3ux1xNo!b?lY z3O8&I$^u;vg7+b_GE$4Y*WRh$i(r)#HxlaV*{7T7BCDzOkwujSt9&uD_GSfvPYmjb zzUz`k5+<=B7NgBf-;VjUphZ5S84hcq<@ksx%rb2bK>6s`oze5WC$XT(a#+&n#&w9= zXS-a?ZKCZW(MB{-nCO-Nv^|7!@2D$y88$px17*d>5s@%_xceAs;G>`8p^!fC0VLjol+V6{H z0}4g!XHl}|?2B)s_Vky}caT$GD`2fV_ft*({CeXQQ#O${la&(zdf!G`jUNt1oU6t5 z`K^42SJU{I+d#k>h_-scS{6K1gA!(JwKB7)4w)TuYh1g^f1FX>oR{Hr{YnNu-2~l) zl!Adk2@Yj;s=!n#oyV)`5{0el;SbyJbf!mX zB*$6J#*b&kPf5fW7i630vyc*2KQyikl2i?+*6$Y@F6sRq{n2$SD)v?P{0~bbKvmUzT;;$Rdop8uhki4mM*`V6ym(2*PEkTu9$zSIf5G6uAvuH~&!eA~l5(zUc1wu@@~>+c z>s?39k8cVGzrNKMdmBoTG;(5)Qv`ke8^OC=0mA<0_|zx(hQ>XGH1_T;>*tCMb!C^;ai2s*fnDId*O`R*<)0Sb5@r`6x{?3 zMdNM6jeonXC6A+laF6yA(f`F^=KSB;POvTtEMpkVR;_Fc!gT?moQvt{X)^!9PXtVi zxRMifc)1*erA%D?Q;e=w?8Xl}yv0S+*T$1o_&6LJx00r>spa1Y``BX+Z46RNC{VMT z4RdEbhxSoT){|j0vO|H(0lbKdOQpuh#6}esBD}#o@Y*i$N^bg*Nbg)#1^O*D9#$sZ z>#nOiG%)!T*lRD)P1?VKDk}R=3`PQyvg>CHP`z+!^Kba8yRK)F+O@xnBbJ}{<8nOj z$NpX)_^dN74J)iI5uM;f_@GE|Gq?Bk!T>7d;SF8ckr&ji_m2KDeBkKMe_vytHg{bV^ zB6L+N%KMZipvcX47I-NYk2mFF##~)aY}AoOh8Pc(s+qyH;MrH%|GNvI-D7>SDmdmz zq90xrG2KW1E?`wWJB&#fNs%}!i^(tpAt$?-97mx;=2R_w`DK<4eE;Yo8a2tH4l5T$ zjH0>l8vmhjQKcX#qb_hf=M1eI2;<+dg8J|@f(8AqKN!9os;SzHj+^irndTyc5Bs~| zaS969zO&Q3Km=tR|JDdjy5W1Pj&<`&DacZKA30jP*&!LuiBS8@MDC zHG?dPQpHS=_83TEd<1lct|>h%5JNC;2PLPR4)>lix+l{<`_%N{Ju*$O7wPwth;bc_ z5%9V52Q7L*QmCV6jNMRp?Bklm3az6>J6?VCiQ$ z^W6M=^xuhW?cd^DtzWaKRPYe98lX+w?^U*w;9dx(;%2At^;S`>LrSgxo8Yb9;Q+<-xegQM)_$6bn4>`;f$EXiL#*%yOFsm^qz zbHNQh>KMRRLw<&NzP}z1ObDm&W|R~z7V)ofUion3L7;0QUU-j({SNu!EoJ)3*vt%~ z#^{Iq+wAameFhs`_Q#cr+D6rA7J7<`BX^wXzNHnWjh%j;ze?cfn6YMF4P-orz3oRIeo9J*avsQVdxnLUNH`EJ z#g*+6&&6|lp;2w|tfpY9r9JA>b5n~e4>;rpLVPB7M1_3`EQ%SZJu3>>9({2MkN^`4KL_2(@oPE{TTp?LIT2i5Bh&#nuV7BNrv+D^?8}B}w{5md>fQE&S zz-2yD#s=g&R19uJJzAM0VaSlQvt<0(5MTX_0aQ1MNk70%AnetFW`mLY)e8c2cJ{Wh zyku~Zg{|K-q%I_x3pnN_KC z&=opx0D^iFTr|gK(!+qH9Lk|bx+A+XJKfClLq4`3;6kk_mhYr(&rooBA6H6dgM`Q( zA4*G{!SIsA^t`#gj_wDQcs_5o+dHb&>9O{z;;Fe;w-kRR@gKzz>xwR9%qSI1aa1Zp z|Ec?#yJL1tVnqDEO;kuA?es~T*qp_v|6!r<9z!P3@;@E5p-Rcw(r76`{^69=oODr_ z<&=H!Z`-%`TYn9|eSyj_+22_)xf1ooNLqdu4?3xB^m{KhzQ&;&)=lYm4P07O5nfCd zphkEaIvWLQ))IN2LI2fxVna;yp~kCf&%iGZdG< zVt#zlQS``c8b-Q#EWV1)n9Ew)hf@K6Lu1AAcj?KB=~IO0CZ#aQ@I_!FmMeckc%rsW z_GVbgYx+-2BqE>?fJ(*mF*Js7Onh1*R47!o@eieEpDTFb1qOSY{0~S!A8+bE8L>sm zVUzebj#0+_K^$(2AYj`YDlN-Hd?u?SdZb!vXa3s~_ zzhZ(uX>a_^(}-uMisg#e{z4Kjsq{&1?nP1{#M*jFTm!Gj$AKXjA|80Ra4j$I%_X}UrvZ0V1unmD6l6a|JI9e)f~@=%W=N$a&Vh+c%aUdI8d4bjd0pz=@YUk8Up5YLx_hQy z!N95@fg3~ivq*K1RIoOk7BhxV1R4RR7yCbLBj_f|N`raon-DA@72Q-D3#YzC5`QFQ z7X1jM>wmEX4f$9G|8rs63;De49v}xb?m@B&tKzPfV~1@MGv9HcY}jVBTP}RW4ram8 zaY=okWG!D0Wt;0Vxv<;H+ssd2=j^|lF|(X$KgR%KnV2&BrIuDlzHits{TauasP@2P zxh|>nrj~u}8&CN7g`>}8QlR-FIH?Ma3ta~{BvcpLAz#Xnu&nNNK^OM#mamuN_S)0k zg}?fch4AWffK2r+6eeg{7Uh!|HR(8+RiFW0PNj1@LY8??v3Du?a!wUz}d%&Rm*%wqJkYjK5i};7P8jn@lEDoj(ZvO!T%V zC87B}&qAF(JEpdINI13`zpy9z%$yKanLBm1{YbExQM?{)&D}55StmI12VbTnn^;?! zhvW~9NlaM8I^~%8%8=q3@Gj+_yW88l zmFlm6+b2yt1n^E;U4}Z0(Z6NXk!@%E{DA(qHILXScaHiu#T}S~>U+X1Zb&{=A!f2v zf8rSAMNu^d?H#=tc8(6%MEJNo#q<;o^w4w@sErBq*Vf=xLHh=Q^-u|iooWZFm^M-p z#sZx7RKvywtBk#W<2s^u-MM2u$uFEAi@N)7DZ-`pzdV?-b&M>>q(gC2_a{8rRi6NK zxI_HRXW1<$rpaZfkvW8@$CMnaOh}zXx5Xxde_3KHmZK%ZxEg;$H8Zb|v(vu(7hJB< zF426FZ3?w-b6QDaNI$Wi7qZu^4E!kIMyQx_REboJRsZOTQio5e)^9s`e(`63^HB2G+iY zkU&Z^kXX*S4RXDG1NF`6)kPdjhi4th2a7@Zs;7T&7-A`~>r-iajg8}NjdWjDoUCWu zE$=T(2O47lt$gV>RH48x7n&08_a_g};4NV=o1P!lZEtI>;w|t!JEj25et4~ATa9Rc ztD|fAH3jSR=7RDI#4T^$`CfKmJt=~a{lD9)Jvo|nH)se`*69gzMM8AO*~*4?)CXG* zKgk0CfL~Kng#?)z2dO6iLuh&oW1vg|Gr^rA!#EnSX57QN#d=9u9ot*W9!7BqaecU; ztQvuWS{-sI_KHb$4Kg`Sgj#o6Qu_T31r>EnclVmg96>^rAhI+Fa zDD9Ms3QqC?`s)kZRtrR$(DA23j1T6WjS|(8slUw?KKs&gK%+vXYNmxv=R&+BjVBvE z00e*&zcE}P&&#lyYwL@OlWk3B9 zvkvwno3X;RtBr0fz*KS^`K=Y2azRVeMmnIcqA zYe;?td!2=-#W6h79`x$NllS<|+D-4)*=kwcD-4#{7)L?s4o4eH>`*jeO(bM^p z%F@+&7_!!12*)~?U8v%)`DH2u=v-*h2^k2TS?q^m(8F+wcvaKTch zKIWVfESbxc{mb7=auHoa4@y|Z2lxb{b`|=`4gC-of6US7Qs&V{eL6)1Nux17VjG;+ zGc2=N%1|VK)ayKrB_}@a2^a$I_*F2JCge!*NQ=GilSP zH^}k$BOQy+$4fHe^HeeoqyE`jBfspc%Ts`gS>e>qkYE1-hYW$iD1+0&PnY6T(;DN- z0sIw*0*ID)ckHjjIz+~WK_?%R<6Y)~1h42l+3q#>6jSSA~ zoO!5y1m*&HptS?qu-S&Re*gxu#K%n+`j;6CfJxe`2t|!*SSdq2V!@mAMmjPr15;&1 z!&_zd(k6EtBY)r3_fdt0@%r(i75PijGm~vuD>%}3C8;_rIbXcJClN`9r67|?Z=>_r z!fdu`lXavjUw6Z|3AdIV3!g|tc&hKq96%`H3zap};OrO+BBV{Em#IvgZqq94gKD)t zvs?VngcIKC@h?zDXY!EodnS%rZdaB|Q8(~8C9zN12=dyrRph@_rZ*!QGkGH+?H4yJ zVtdC$XAzpBR9bhiW2t~qpAgGIY05L@Z8F(p?cfn>5}JvfX{cfW{-G_XMKBTski<-Xxz3Wv2T%gGHLUHm5oc!5QzV^GS8%H=0=^z$dESF zEll;7gO_jWp=wsv+s>-DLXs9{WQTTUr3Y^oJ)&!aDCy|?#K9<7($`dM>n^jr&9=#< zF`VK_*$*WM0_7oZ=x5>|DswTD^BhJ}L7Y?QA}~1|-OWR)z36#DZ|1nk&pW1ip@jUH zMIvX-7}Zbb96^dKunTY5p$075@tYjBmx@313w@YzS_Mcrew<1#(y6{Vr2dN&sllC7 zmTV8{(h72{Csp$9W`6-oKnvvT%4ap`U#R3eUE@QU>_h%7OS_&h({2^o%ADDCtc5F|zG^!y_gWkpDE6O+xeQKx z<%!Tol<V9?O~!*b2!x9Y7HuC+ zOvuvzpfMM;OMfH1C3GxsOUwp;T>BK_#`kQr&;G@)8+IReA5H1w4$A%?gp!fQ=iE;3ynrN&eI}h_yhLEZ^!gwXQA-NETNZbo=2PNzTQ4uZ%>Le+{r_V8{K_8$- zfG8iTlwh)}-pxT-q*E9Vw5c<-<6u0h z#=KMZGb9-{plEPWg{cm63lr~PpV^v0v!NCQPI>-1@7+yT>Lw5*RE-h4thbrqwN};Z zoA$O#S@&QWTMRi(iKexHIiV8?407W*F#h%>u7TWu{j3ZuGx$C#^SpC>uc0?DdrR?- z98@H}KO~qVz%LQ$rzU5v?6>84Mb99U%{;*b9xJ;9-%mpS_M*N&W4;Dc*_&3{$_Y|k z_)IP=0m+~IBL>+2N-SNx%E@Y2B5l9VYU3@xWb6mP>!~hI3|u}a+5WD$cEiiRkDM~* zhIchhT;0kspq(UgI8y>B1yZ*34y?y9clZ1ssXO$@*2FQ^K2032Un9 zTv!+!t<<41>n|?ErhFFMUqKWz9u(UZPejYYDqZQ3;J@p-?GPuwZZ=HVT#lsxYLQqb2|qet=Kby9QCKHW}&@ zOLYpGMAr%no;&;=Q)_IAUE&)1cIts{tZ9Whb3xU|V1Gh@H)U$=*ag1dkX)h=XJJk` z3w?gNnjHUNi+7s9T!F;F=-c%Ytb7xCf9UvKYBdK>%}|jtb2x0Q{Uv&hK~zhTx7o8N_;C{j2uMtT zaOuBhw}0(l!U|_mnY)Gp>c~ncEz(7psY}1Q7QbdjKqBdjwC}!euND$@$Y(VaJm8Tn zkxpfCo_Q$jo3H7Z6?V+g`qR55)57cR!` z9IfpB9IuH?50QRVBu7pTml5A&00h;FW!U8PC9$$czkJC1LftRgsB&E6A+Be9H`&Hr z>fKu{lCKnPB^|$1OKVbugK3CpO#%cxe*naM=#|545e*>`t__z(ofL#5U_Fsc>4K0V zFVC2i$1FAU{w#3eX)XQg{Uj&HKDKJi^$Y9lPxaI^IFKfay7`l(I~>2Cv8Ks|Hjv+f z3{!|=^7&$xkG()6z;F2U2qN{ef03EBlfde6ZKhkt^ef#)DgVa%+&=7WAa0lxX7}4{ zz~k6R=TdKe;{4_q1sb5 zMURJ1)^NfFg9pb-&i^T$N?>l&U9-|hYaS$YIo4Bn zMCOQ()j8Ht?v*$iI$~$!8Vh=%`QyEpbSFMeb2m?2*|C)b{*!;n)JG10-&;i+TitDB)OqVQ?K}RTnt?BVJw*;L!=S;t zQEl`4c{Rn+-p}dx&Ew!SUk+@h-WjH;5~|ua4KAMvsI17Z%>`;GY6<)cwH#DH??ms^ zb^A{ke$qm|NWS#4l;w#nMw=hGW8yfO>$IH3{uj^9dc*waUuwX&ym?<>TVyQB#U0$G zaw$5NwV?aWZ9dfyv%UVb=pVrY-_^(=MHA}Odk}iUL$`{*5#`eG{=2vdiK#2u-qRGU z-`H402nfV**w<9zFl753hdZDB*6m`R+TcYe-&fP_2~22dHV2^s=LW>jzvz_B-bh-v zqFmCp$tmoFoo}U0+5SIBrj|>;>}@KyOCq(IC4koWBrUk$b+2a!*sy6stm;kA>fTq$ zJqs=NPbxRa<1Lao&W9jX&TG2Ox~R3@a53R@LEfG(w1enoam8l?zzPfu_F2n-Fk$wvu_FSb%OBH zf(`fl0kj|wXgL3t4-1w0&~3L6SpvshP3TF!AAp1m&$=yKDI(0(L;BBo8nOnR@)P%) zoiHUj;9`3e7`!vX}Y$8R1BIVI&H97HALch37X(XYD^O z1#Pd1go9!0>FQ{9PASkBbPO2FUy0F+syv8_R)QM5&`~)2&ECX^6;>xh?YF)C0ys_1 z?Y_cf6FU%vW=-n(U?{bT2|6$4vAq6=o-;vFv$SM z+bp#R^x71($&9sHs{gnE?<0;7I_dwaSD`<^Cbq7yL1Y<_%l=^zFn zn`oc?=QRpEe5NLZF>k1 z-`VT>tLKJb*X09%l#DIzvzDvv9(K0wC`=c&i9Fd#s<%YWaWi2z2$}!w?nNag5^;0)W9A1$X*4Jd+7vS(T&MDNEOi-;8fpk3y=Mj{ zjZSjxw`P0m!d_)p{vL%uZkLbuqJr^VZu_#Jkg!v+0Ezj-3P?}Ony=}tIArHkQlypd z=@SE2_80fV0@y~- z#MJG{j*Q9im_6j`Be?I<@e!{(Ptqjt5J^k!|Dg?Rs2u|;kT7_*RTh}EQ$iDp?7;#Y zw0&-KMN2Sfm7!(@VA6bpJMk}-bQ3-PUU)z714QPi<1Z5FEM1!FK(K_-o12agVhT0z zLe;6qT}5-5I{r2$;weY?OYO9)HH;ov(PC&ZVi~y#Svrx@_3}7!SuQ#w=EtwL9+KVA@flfNOim!8qt}kB+l1o9u`vY5~L<33G{OIGSLJpF@ymyK>7Md?f+JDTO9gRUZVhoqEt=m#pN5%c&}9~ zCS}2|nM;@U%{DaCGmy1p0>r2;lq=Ad&b}VE!QXrZ7(3M(+ml!)b>`XJ^hh}8N4TSZ z)L8@&Jy#}!6aNgvq3@0tV+CyzktCXhmqY`O6@ki-H82 zGEO^kr8bl8|`HuR_r+~}5cZ?U=%+J5i z)V=8(vj3cLvH$^^lT|~x`b{GKHjuW(l4|>n-v6w6L?6Fj&7y3a{>tzfU;GIo&yH9O zb3;u#Fewh_%{ltRWu89SxK2FG8B!t#vz6u7*TlAfVK#h(-zBL^eQcrOp8`;t!29>U z$LHs{EExno+g|FwFmyf>v#rs;cH&^F<7GJmwme<^&8()1lqV6~6vwmOx3}qXkY|y8 zJ`=qoQm57PoWljvWn)Q4Uag^21;agh_i0nQSL>jmq|wirb4N@@vMVB5_~Rx~gbjOt zUr25G@a4;0T7#xsJC4+k`~m?_UIB!!`bXs74Q>5?8QN!HO#k_KnQYx~d{Z49&tFdN zZn%&|0}KxtSk0)se-wVBlOa0|frX4sY~XH0LlHMk^=E4YRHqY!J7V^0BL1@X0Vh=v zGUMtu;t|*jZ%;sdUdL}g%G)-!k+X|x`xBjN->qf$ zjy{4PQZJ3(G=jv^a%kx)z zK&tO}f>q(cL~kmkcRO1XiX&m2j_@9Whm`@xIoiFii*J{dQWYBekA#gSlyALyh zN5=SIG?Ak8si4S3HD~n|C2`WQy%*sh1;UTdf*z271@bIo`{=%qQpqXJ#pf$aVnuD2 z(t8h%bAO7_gpPCEtnqzBDP8-U^u1a>9Q}vp7wpie{=YQFdWbZk3=&opQ!{vH6MEvq zA`8`S+qXwAS`A=3JG>HncMVWEmGJyXeCF=W1(K-)BRMdT7a9MJHVle3+*s@rE9;Pd z2;ZLBZeTf6SA;k`3cOBhb|3xGAHzdkR&JP(&Pfhe&0dROD@K1`%l2}E`eMBX>-ikC z`^{OCV`{jC$~Bj6j*Q|oxOsf->;*bzi>a#$6#1h>qT8oU$aUZ|3#c#Sst1QVR6cwl zR@8bb{Po)iv08=seP??4Rrc*NK7aht0SncoDC~{+kAR<zu|05ifH~w=$ zwgS9Rqp2^Zio~ds8O0el?vEw5;D7W}&tb85gnQB7gA64@$a1U&-@K9MgiW1z_B6f9 zZ}7ldN1U|%tsWgjUi3Y}8UeFEb-+z6x{iY4_w+rEOs66MPFBIJ6gmdzCLnz@Kg^m$-$1(5+D(?GrYXvqmPqVDza$eL%rpq1*I2(vb zrfYxG%zHt3Jh~*c-&{VbexvSqFxG?$>#ohdS5WnUiINHl=%ERAp zu8P>9!hO3C?byxUV5VGB}zvHehB>Q5%?&Az`y%!TO{yb zAb_Bo^AtL(fe5XDhid1F8>-m86PsP;7)1n*UAwsK`uMmj}{1W(8 z;6FK`z~6ws^%}teD)2uKLH`Q{ehK`CP~acAJAvOU2%jU6B9^%h(s*@wE@b*yVK+qi zBPf~clh(H_AFBu)Qquh(B=jo?pWMJk^YA0D0^{6*0)I9He%0+I@c+x97P2SI3_n^~ z66>^lK@pheD49n4VwTZk;I59F3WV151Lt91ZEzjzG?NbTyU7H8zmvef6Z->GhB@@p zoCyN|PadC8;OCgLV3xl`f&U7^BU|R@#Q=;AHN|8?$cR}M>iK^6`A#y%4@QwW+h%~t zs=2x0b;fTUb}fe(@Avk#KjqHYXZqqDXOj3YQ~rgvp|Bodx@{2Rm(436NTC4#b30;} z9bqp44(G@mUxm-R7QvX;87t2tNHa))##O{mes-WG9A^Bgz`qsJyTKlcUcQ5R|244Z z)zoJ>81G_s@oD*V*TK<`Ug(Tu`5Ua=6_!y!UxoY+u}ueR}}+W?VP~oO{bLNdJczuh-y7G!isW zL4bq20<+fo+?2R)Fo79s>SYWDKAimQ#9}KfQj)(vFcRvY#`=h^_>Qzqa zH;pWi5csDff&cPAsDz-W`nIaN64fJQIDs4baCJd606A8I?pa9oI*ThIA})u(UoKIR zMMsLj-xC*;8z2@hGi?!NU4~%$){)M*z*&WA_+nJCCEvOz7N{I1>dmes~-3}$duM|5Hi0cL&?8eP%W>7FzDYM zlzSFJLKP(77a&9$I8aBMy!HSY1tdgD zaz2V8^0BK!5SjDC-)}AFMI<_mZE1c~o|oX^SpM~a z_OP#^A0qx7&dWv_GKZH-<`{~Ns`kH!igzQb*^5z>RE1vxzl!v~Lm68Ie@VEKiw&Mp zxxS`6GpgY4hy3pOzND#k@BBMNW(orWB;IddSr{{1xy>+xR{GNFxA$SwP zI^H{qc#$`Ih3A^BBdtI#8R$s}$*PJ=_U&Q*?1AKej{Asn*h~TI$+#iG-zaK6imBmz zDQjpOjzr|;*lB>pzL_`-An;{U1$2r0W&*g-G907N%XxuQ1-yoKO4!NZP164!g)JfQ zf3+8#TV}3kWvobYm#F0U@tNg|VLA8j1(l1&^L_0E(G-VQd43q2QpA{6dBAaKz>Ea`<&e)MvH$uIQSjFW321}7&ogla z&pB22jW~B+H!%;*+G6$^gG#oqDvThI^T2EsmSvqK>@R0-ZmiDb`I-^Ox9Jcs(>BTe zW_@waJxAbx$Ib#s@%ibCu=C#lIs zTh6-&B>x;#i*8Y=t2jdpY@E$Cbc(%(dQf#F@DCBt--oh)U&zBT^#cg{wJ1rscIoOT zs2`&JNrDZJK;mDHlkE-C24_H=m%#rXj-B6fz127~zK@e_G0r$sPoJl}(>NopvF5vu z6JZ6{TV9qM7V-UDQe>aRA!dr`b2t#Ycu)C0PRXrHOX53EFRE5YuJlCbqE5v4-hxwS z5zb2;JJG=hLH|nIK$yXc(qiiR90dL%?AJ*^vWid#NgpiJYe50Q3OIi1a1QDCk!=5a zQZuW-UrXD6z&lk`;79dI1^&-a;8(f-7Vl}>P;S}+iN6O&V;2sZvpAwuSt=w;fooKq zNwZNiJBNamYFXD&)H3rSuaT)W+_V|-NkbnaC;F{h5cu0Jg_*r1;tw~*Q6;@`BV=Y~hOBHbIsyPZ#_w){8Curh;7}x#JO=oco5#A6D0mnxfGrf7N*#$f{ongSg z#k>8^Yi4DEQRkA0lk>cW-&wyAu3<)kSXb7DD$|;wYTP!lYhRDg&P7R=GGcj@nQVU< z3uxbj!2g%AZBlAtfvIkRciwp?{QT$t6uPpPhMNnXfkB)Q_@{cGCG|_8yRIoBfgj$> zMo7|;z>fmI1pZx!S-gGldg#JuiVN{81pa^LJIUiE_~y#+S#CwaT>^g_Sd|3+UqDvh zg985-1pbYch*A_szoF#pw~n<&fr)Z3DLF!AT$klc%nYgWb|2*BrI3?<2yt3r4hj5< zV@Qyez~4VX;Md0}cI67kXR+d#{WvxUz`_*J_>WJ>+nIw8aaK4AzIKF+oPA(rLvSu6 z%3G%)$nhO0^Z)K>Yxo?Tb2S9!HIT=@An=ooKmvZ6z;7^snBnsf_*L1L8zF{wxS=z) z-}Tr6aJseNfBC54%b(Z}3EPYYMfg{@Lcm@Hu~`EDiMtT^w@cs$A5=~MeMr#nK;ZBE zg1}!MUI4RH*Gu#?=5Hp-LqeI5}LITYp8!wjgZ z{;TSL6u}S)`shX?KvEu_rY(w{L;^p3E`i@HtF!5=6oEf0{N#zbu^&3XW-a@@GuRQ) zk{6c|JOH^~Y&HYpeJ8lC*%u6wP!Poo@)EtfCW0y!V{@_I?%@*X*qGXx}n8F&k;$MlzRP1}jw<<}5!iGL9?p?a!l)3boW$0bI@E`@!?kWIG40#n)osrPL7X-x;hthS@!KmCWl z34izFgup+gJ8ha(E+OzwLjwO{2>kOP?y878AF^X5r2F*7vo!?}@_~x@A44v#hrF(% zxQgReNZ@Ayq$>SYNPnv9N=94<*nki35pQsz}i(-J0MTU#8n3sfWu{HebABmRvqWREg|pFt^kcz_W1-RFB) z#CMbSEeD0{f>6PnD`$7yrZ>qPZ$pm%lrdP#n3RZa3MwXHB>x|WB%H&`xG$8d<=2q$ zVJ&k_Lahqi*O@2P5b#q3{@IM-u{fSvAPGy7wJb@@2HrdldDa)B11NwSVBpw~1b*%T zhX`PKA40HYzr330Xo%3ceD5qn_b2@BU`O~Z`)b;0c6zZ~-#2nJreM(!ZFtsyOb(IzKM9dR_$8jgmD1%`f zL#iSy=ALhGNFSs$3I5KRm5dWf>;(j*G?0Pf49@`1QB_{Dzd>yOFJ$yzeBMFu78xdZ ze$0hTFY({VbA!cvwy45T2sk3)?N^NFQz+ycq_T?nu$(mQ{TsV7QH}FfiKYpt(kdXh&mW> z^U3L+@VJ3LH7TN-cGv-_-{1o6V7X=hD1lT4&JpMHFgEnKJnUgvW)d*qz^n!a$QX2I z@Q;GI&VTP$Uf1k7ZulG{f&Zsu`Y`z5#}fFd_hbUUU`sb7GM~#Iv4(Jy1ZQxNSqzFO z-@t|`IbUridqUuk-S)Nrc+b`Gth0J_bQFj8a40D$3DworAx#>a?CAd5!jdl&3nUi! zRxGe<&#v%K|NM_A@aKhF^PdBKFNo`^Z>8|0kne;A;+lwme22})*|A}Qz;BA=tEe^Y z!`X2sf&Uc<{ADO3W#h4V_YMO8)1*#*hLppm**%Q2=t~6tvh45*YCYR(OF}DZMwcM) zn>P74O7Yv57KO)AAkvxSn&&9z>QotJ4Kf#J!1p2IOW<$C*>UoEZ}^z^rW_nZ?7as?4NFKU~f)|)WFY7i{nVzMMzB3u%*-V|fan87@f>nug zB0opdrDODN;tz)_+4()P8LVJ^-opNU;K)tYc_(99T+P{y?@in4j1xe;1$ zTK)D^NB9*Euv*^V(ggk$i;BZRoQ6A5_BTDV4!LJhp?Vr;oOO2Xc(s*xEb66_SRs9t zpOYDGvsgQLp(7k8;Gh9#SQ`Ngu5nqmCEGNzK#IVR6SVbWm@(8A{`R>#6!@2g^78V8 zz(0+~LDJO30@J|)34#CN_wttr{3`JO=IwCd{H5^Ri_e57w>=S7udhRie}0OT4#B;r zr#GBFaXP&F`wzmgqbEXpYkOF-tR}qi*RMn1UmNBxTo5Gsw=^|J0=r~<3G9;KM@LW} zh3uY_mlKMLiXezd-X9M`d^Q9@zofJ{l$Mo3l+TH~<3mz?Q)6>zYiW(gTh_e!^FnEP zX?!Od9Zg-Uhrj|!`UVE%<>yCYR99zL3@{Lro&|uGos%63iV8z{MOpk^>r<_WJr5HA zFwj3hR)L<-+0hxsDEBt?dyKW2<(8I~5Xevw0|zoPGbRZ8w?b!oXJ~J0hp;~$ZlI*< zcXOzN?GaCzP0)aKs;H_6#k9|QXmi*1$@cAAKujwk@PGSuO-f8G@LjOL+i$-e{_!7w z7J71*hue#ujRgMNIz5stvA`6zfXjh2fnUymD(5KzKRg)7x)S)`f~4P#?@-ckT7ka? z0`H>oyzrYtcOmejurI+|g7Ys9w?qPe34S{>ZvF@Y|2D|VBltxxcMgZ&OLT^aUss9H zAVL{6kb7|{h7(tN!g+|!*B~4#e(_^OMA8KQ6Oen~L#g~Ss_KJabqb(~aoxG~agBaB zM-uq|M+p3?-sfh{2^X4s!+A*N`f@W^7u7=aUPMNLUNUw{qJER$fCJ!-s~||%LCjAP z_-993NO=D$fxirYZhj<(58|(_zt)4l7mgBmU^%4Zg@}L5CIEAYb=p3Z(0@xt1H}us zK;V}&zXXE!2~^B?9Bq!6;x+gfo55ZGK;X{~?;mMSDezNnAO6>uS5FZ5^B|*x)&BCp zbuz1Wgc^L)ik}!nVgHzgv>P0=13OB*7iTO$yyF%j4wvblcWG-27TQ-MK$yea8Lsco&wJ`2qKdBdxpYwaKk*>SqeUyOJG0_B>r8Hws)Lr3y;w+ zlK7vKzz;s!0X8Xt|1J9Nv--Oc_|ppfkn_QM_Yy=gIDQ*Z7BZZI%Zf>h1$PgO!~la) z?CIZb61~M%%~C42%<=hTZE5sDcY>*Qg9Uf7i+@&;$-(tCLn8(fd_-Nd7z>4PU&L{r zBjf29!5xazwbRBf#&RC^$pgbRlR^PMd#S@E#@|n)0>1?Q0tozMYnUMLzlU8Vpc~|2 zmWHPgv6+YMN8$OLfe`Qb?ZKvIXmt#1qQ4~1e*}Tw%&tw;bv0w^HL!KZzZo%lv|ra? zw7t!a*f7|#ShcvX!4{t}2JD}hjTSMR=V;Fu?=nLKIJAJV_Ym-4KZ!?I63CDZK5bxw zb$$0_TMTrl!nXHm_ z31m9KK-bL@g$r1qm+_ek3zhAUUmJ)-KS}eFh5g=zRTtz*6~843Z-bz`i0@pONj!W9 za-j>`*C6mOf#fGySkiDWq&`WQ5(h2Ag_YNmfGqJ>;^KZt!22L)N+eu|>bQ>dsK7#f z1xu?STuQEt1VBEM$ha3GvGtd5Sxxk%O6MvG=TTP)#*!(`7;v={B|hqGUH|hll`(i= z2juB%kn**GZNAA{heYOAAn1E-$&GEOeoI6)Lx3dE)zs^m<%LA$x_HWnQNmM_jQB0w z+*G~phmgMW^2|`*UmZ4Y{|}*d!*<3HL{Ro`vOUrSekT;0WZz!q#-{|gym!2lmph_w z8B5MHRpb$k{9;ofb60(DBqKXtiXjO5)1Vc}@CJ-Xj&+W8KwS3orx|y}l;a$6OfKa6 zxr{;2Rb6}M+1^Ncmr(Dfvb{w8C#i>X&L9?FmL&M63H)Z4aLziOCF4uRRhitmV6^u> zh{$_U?tf-E>BX4qem=rm(|MHen<0yMj&tfW2)oV?=f*||#4h&Rd5QgRJDMVK`t@}s zF_>Wib(1Wdo-+auRE_$j#*?J!s~w|6RQJbh94bmm?l=9P^WGpEvt+0;o(~aOMa*da zNw)pM$luXl5(57pI?f;x@6W9~BTjNZH$Y{O7ieEBomWFhN*M5=s=LoL$?`r=7BQaJ zK$e$~?sz-bJRAw@tjl8%%rY`tEFmc7le66f-1L0`B9ub-caQTC@|z0sW|%OrMFo7b zV;tonQm?&IO%H z-F-u02kD64g2Z2p!=sk>f1MWVAn~g_mH~Onb(m?jXVNaoR60;P*^i?Db{Yp@C(Z=} z7$3*cp(>G%x-wLnR7)Dw0W{%Y@;yTbo=Q_H7VSqRsR07N>A%ZSe%gwCOi5mV&0;nt z)SV3I_8rRiDV;GX>|`vawSEgn%VL}_&n?U&D4{ScTUZ#55m>SBTt`g9+=4^Ke%U}< zDzH^4&Yxid0IW|2#4a;38Ca^b%b*Gsv)XA_FY7~=gAp%z=mRmSzyq_uodkXr_+NXn zHU?8lcucm$0*M6@3p@x5q!h*fKzx&Q?E9kox3wh%{%^~Ye|kB0Bk(tzzZ|xI@42w; znJr<>hP7eAqIuw4Gb2g8r@J?tI(`bJ{&&OC!^a@jw}-_`tHbLM_}4=Aubf*Mu3Wqv zKKW>8B*aU`SNY!SsMg=#*B_SFE(z-)`;SAC@9pUeS1w*v6$G2{d>ik_@-QklDJ`E?2pJrVLF^ua~&aH?__*W^n ztD{@=F@Bd{0M1>#Fl=~yeVD&!e#pzsjaZT51+7hO;o^l$;bOz3aH-*P3^WkePLKH| z%NEB>0}B?;4^{K3V)@EroH=zioIG|qG!wAU(!_eZteD`0LP-4`L0gqol!hv7vVP;* zuzc0>XbVaFuJ4oW+qZ!Il@R#9eY++lCKmWESU>{*&weI>zcz&G=Mn<{ccIggI!0Hz zYXZgn%%G`C_S0l;ECDmh1oPW}z9W19$-Wm7tqS}bQ5lzjzo8Qr;d`n^L^}@2j-~4y6WrI5WLTDygjI4lt`B_h8klT7 z$DM5Ij~MSd2-^P!;?Yda*981QQ%)oE;!gz)i8aO=L5D zVzIb$M$C#jjJ=0A&#}R9f&Vf)mNzmw{{OT0-tToK=bh(EbgwxW&u;JTm*3dCIdcP)fx8-G zKdVI^=cK^})z`I3>uVa*XZHu{2Zm+dJEg@O3WEhL?BcLP_n0wujLSshfzu?OBy=~WR9VT8o^KdwNED!^De0L=Yc2P~~V2JnAdV-$Y40;!^a1N`$0 z;6DPuFKgR%_XMO1>}6Ycz&bimfP|F*iGuL};IXg`d>0^Ef$IQyfb*R15*$befX>5m zSv~&Trc!n4WeHBbFaUm_c)$j+rfpvf4)c@*SYo)0`K(^Vc zl|}I%%Od%YjjtH(4X{tbw^o<7igas1Ug+e^&1P$ zt7IvC*Q&yxl>_{-doz)Ld1+%y-1>$)cINC!E9u;8Ph}moMcIN#1cru%8Y8{hiG-L! zZYZPrw5lv&fg4NMKb2K`tZ)DN)3WNQvPgLy>9xx(0RJ(8x@S~I0c9y;WwK1qLX?v? z*)FZmw$&79wQ)wMi;ZpChHMktWo4^VtnIl33j@|to`H><1^@277<-A?t=%iIUcc{B zTj=yZqw-1|b*lFyH4)%v(G2yjNcD&M%*tP^+EZs(Y;#TJ{<;9)FAKo`f~xCP`Jl__vH$EW79&bTBw!7dI<4kZv+7@=LU?-Qeg4VkPE^)BXg9R_Eb#1$#aUgc;2cY_i z8`L!hy3&I6{7r)9uWO;qRh93LRaa>5*cGN&pGuWq>ee!~QM8NI`p_2q)8QsrJZ%pP z)~MIP0=>T3Q3LDqv}0MS%j^952-vO(Kdks))?oa3 zS?D>d$)F1Y^cSmtg7u+iHLXoBRDCQPm=O!`gq37ME& zrT)<-05(T8j}bvoJ#MZ1lZCA8TfdpvV4%6^H@+THxDoEm+_O z;EycuJHY>hZ-z(b3d}GbX@) z`MSOKg;(vxSDq8Vzf}OgEbwbG91D^G;6EXN|9}0|j>}?yV}lkku32So{`%K!)7Fi$ zqF-kFKR#gp?N7gNcN?3nxTGjprKkLKc63?Wy;ghSmFMkO-h3m#`E|D&?C7DRc4+_6 z5XQHD>l=dWm&)pYb+F8jW&J<-!#}eh{N>+6p;}yETcKs^L&ds*-XD?bqU1KQ2S&F}~NaNbuH`|xL_C?z!puSR8@j1xT zg|ipz;Qk|a`uJ%(D<~fe`i!@Su}k$Yx9;3*&%OApZI>QfcWkkoTr93?EbaZh_TD=m z3gVZ)Q|E2jO47c>Zr!{cApSM$R@sKl8|({TdM)^8M*{p2t{=%)fH5rx_>-@zhv*8- z1_d18pAC1((>PiIdINL`7_Z9J7}$DQVc#WiccF|%WC_eZA)^}~p93}?5|C~IRDViv zd5x@zSLySipx+M!WEW~qu_Q+e0AzuDy~YHYTdfz2`~Jli!SHQ?5vUT>`|^e|+oCZk zU~hos4|Tm!V}Dz)zz@tWkYB@40iXvkuhk-eGFh45BY(ie%x_=O_}hMg=UD${Z0=zI zKdkY9`cTY6u8~#x%^IKEq7S?X|6%F7Q}d_;8V_UcF*ZJCGK|;~;A@O)R4L9i8aHH| z4lT}W9P^IG95H@DKdn^bo*R_r09ngjSwU=`5(6AS;)E+{!#E<)@qFO+m#eK|4jChG z>;|x1)Oa-IggmTQ zIovFGeZA7Lc1f}P#Nxy73fAkuh zc!i9^lxQ(XnhF$YU=a$t%$}{);%3#c2Bi;!e+=T1&K(-d-l4@OE2Q0$viy)2#(3-R zi2>5RD3p;K7BnzE&3N#kEBC@;8u)-ET0F2rXb-zo5GRW^D&(eJT)}0P z=_}Ins_G>>%04IKJ7x02&JbsGFAk{evRDC)H)yaNie%d`f^H zodvcgTA_(#uCtmB=>3)^m;(qYP!Yfp02tW!7pGeSME?~*@T&wL=4(Z7OUIzy6qwi` zxVagaUw(n(0gd0-QW{@I3)nMP8Rg(%(SVK53E?G&iZe^6thP7j(|`)G(h=iM4pP(AtHYA zjb&$m<{EW)v3h($ur=1rcL-7j+;o6{;uVI@>YN7nB?VADR5yh2Q>eh7P6EU zW`u3HViR1k`oNhLOTA#_ zFF=0;@PBlnH2^qR=H~h(SsiayonXO@igTz(K<`)>U!pP%c+Ucu0Pw5+tPm8<^?cPA zRfq7B#2x1SXSM1wX#uLoayOlMw$NKCD3^4f5iop5pfYeZmd$qvo~Bd(r|;j<%TBj_ zeJ88Ui?tG6FEG?!h0h1jh`QD-Fq@Zn(tS(e9M|H8%Yu=Ck^z`Y6$j5U>g$Fj`Fi;+ z3Uv@m=ovh~=Fs5)|AU)JdFQ?#)3f&<4m5@fuV|Nt^;&2}SsGA#M4Qs3^24GN7MoDs z*fECPAsVrFQW)&^fTdX4Na_r$-1{|n1NgREz zKha_r76LH9!mb+Zg28z?3+ZHG4wf0%(Sn2B04T>Sx}n`-!3%K!`cux?73MW9g8S{4 zD#Hu#4Yi90uXe}+LR+XuTh$Ko++eVbg$7r(APr0T)G-z)Y}R5K>JNF30sez(XZwRD zwC&`D0SNTL@&lImKd<-}Yf#~w7Cx{m2MgDzL4-@)q&hKBLi@|&HsF2Q*H2mCPXze2 zK!QOWj-a3J*P#ILw*|i}9AL2?yKf}!LJ~Zr(m!ND9|!oyzbUlm4)Bk!le=&Qo}>Z} z@IOhlQ#USk8Nxd=@90eA)u^w}yDGoUr^uK5i*K z`baE3U^0%h6ziMqf!SUg4VYM9qL2xe7A*u|@`HC6B_;HC!op3(iPa(WVzP)nO$U>0 z`h*?Aj>6!FjprN12OiKTAY z^pquEfkc3RL3e|_^@?Cax4@r#p*=)bz!ex%0igPx9xXnrt+nIFk6UMFrViCEK}syKULFS=RDbS;gY=&^M!8GzrST za_Opo`@?qQ+RXsEZ`--We&gSL$2Q2qe0EN@p!R+CSAX``R(Gq;va_>ou`J?a`93E{ zV@K-0ZWPeI>zN$^uD^WosvS9a#14LPWJ&=4&wl)iP%amiEwmlGv}?eMnvmw+o?dHi z5}c-m2K99fwz#swKL5t&6#q_Jvwn>Y%2Mg|tJmz<;S<5iKkiFpLBF`9I4nM32Y|ax z&DN*IECZ?=&ph|E?fJsXR#CawO3O-Z?XX-0+p^^~0s3F7 z_|nyvMRPx7F%`gV0u`K2k8Q;1oqa1fN z?#CDw<9$HPWxCFMJ>!X3>1SSkk>bS&NQuIy)-aC-0N<{0HpZ^-2T#KQRjbCW>NM8X zscQh~q<^fp0@BN;AU+mGzOZV}xFzFr#Lu{1zZ@|i1tUQZsZL{pjB_%MN1P=}8+l{g z4$Jiz;UIs!iUjyv*7+G*T%>WlLX3AP{5}n+G49A%8)@SlyBuJMlpP278$Vbw2c%|q z&pK(*$(W($YcY7XSULxzhzhe#I!5;a-l$U7Dz(4>0GqxoV}dMbU<{AE5jI9l_!Mb( zfh@&MI+&a8)WU^kjbj46GagG`$UAe)m0B#oSR^}`q8%Ee^$n#Np<%Tc<1AO)d4l9w z^np9(+1muP2g~--sX-scgfZyI0)%L1QVhS`Q97GcCPPKT$H9U8~PIOH9bS&UkiE1h9nRriG*WZ^?dCmL`co0LYz zK*RVa5$Qk!c3U7$^rv4(Ou+hR!Q3`!U^zs;bi-gui^6~nqwc_(z^-7dvQ@CV)`kQF zEQ~ud4^DZ7tGOwEjOVg51^0>%1_s#`fW;x?pE3eJfTeuu72~<^0fbK)V20XzWDrwh z!i?j>*YymtLzgthI&X7@zpDCfP9?}<*q{G`s9f+%6%ey z+)=Jc18E6k$JND<%0JO#^151&+`NA(TCj)*!7|$k3~W;PA{Zg`Deih zW9-}$^!CFQm`w^i0QhBre+1xn3;eU`9?tc&I>0~A0RBS)b+PzO=LQfT=p74s>I;O< z4U^nC+LsJ=-d{PgZ=@NMnh3y?{O7`3P)3EXa_~88C1aFt8xMT6K8d7E})e z_$65yUbQ$+jeUkX1_Jm6Ce{mPY*42jkbd8#4q5arvELE62qcNsWFYx#f)uwcEwEjJ z6<*+B0>IPUPDsT0&S@DW3CD#H>yqo#sVl)e~6cttv9wV4CSAf48UP5 z@sqZg<^Ajc9Ru34@aCZ6`pNOTAw2349s0a%fjmkFFAF>;u%@q{;f0;W340YD<669C zkY-+z+0}!W5nd>O>R*?&eJp?f@JLgrJI}8!l4Z<-SbB9HkQcCpa7bH|7WbeJ0|brA z`#CKL0B&Yyf#s@myX60>Ec3ssg+)L*%xG-TiVrPs}^xAQaPh77}j;#tPU+~Id!X3mIFFM8K(`QUeN}flLh{hsz1~n z25gq8OtEtetHx>5D96BRZ3=?{f$OSg*JPdlya0Cwd%m($76i0dCE95MkgH7L0;lsr z%U~9}XD|p52C>uzva<|nxu69FwatB@{i4qC0uF3X`*i$9pS}51k^SFaT^?SrZ>h|m z(1M-gY9lG%&#o!b%X!RlD~no=X>rfa<%I#>r@mst3^7>2X(SYR`jV8Evh@D zfoBJx{yu5Q!{|>=my3RHHMB@VCWkr9}9pKzpN91?pvVQP8-i3&Qh=of4 zx)}WEpTuwpKha+l*GO6jlTSQC+(rk#f%w(+2#b}ORLIiA29tJ7x-q%HBo}=>CeP@v z=oM!qEZ)ZmBSRAwj62|uK2VPq39yipJ{#dj{-U^bY9wF0yTukkC}5pu0+4ZpXhM!; ztNR%a{Dq(KzeAqlVZ|IM9}0y0&}U_lGVgZu>qtNOAZ!+P;+IKFzT?5gBJqFcjRFq? z_$w+ZtfZu5-ss>Nbp>33nNgsxPryTKs~tUh)c)}w|Ir#78*Syvm9}%|PJ88*S8U0W zB>~D#288SY|IFOu#8}4y_)ncUYb#eTw`D7q%1V8u70Gh_korMgU0v4H&}4UR*IDh& zTh`g$r7`9-d*+3w?YIB^Z`tZ~t2NFg<2S$j(EjRA|Hf`!uMJUc+_K)Dec@TFs;(3q zFKebtDy?Q&ja|8T#ZJkZKZ^kl?mIjwfd3n^z)zYY3;b{Y>(4`6iz*h`D|=qF9a?~} zV%4$$(qGkrgOkV4*e4(D4?9S_`uUdy;_tTYvclgZVE^Q?llIBpgZAMsKe9?KHhAp| zdu;87)hg@N0p7oI~Z_rWK!!2iLB1^)G$))}8= zy2j203kCU?Em|0KT&Ud%0*K@tyu>1y)Zggq0Dn>?^}t+#S)+gh{Ilj(c|sqefQF00 zF}j-&ogeLuF{3bsg@d>!zW6-UPdt$5I{rBbEAcxzAB}ZI*JG|8gdg`7ogVXB!(Cw< zjYR}uOiyEN`l&In*fisFva6vau>`7qALcwbrohl#}S21eu#mwO!7?LNdx1U0PL{^8L=@* z&c~h0N6ZiNwj9wMH#-T)TVPKZQv~>j`C?2lVpDqMfU#1-OA8@|BxbfAl>%9Grz~`X{>Q(YR_%16g|41voA84?Bh*ErEyyr7!$Z9stlWctT!? zA0v_E4X64)p2r7zM`?_{qx&#Y5#s)$v~!(sBKOgElxGq|xr*J#m_zaqz<4>veSqFI z#vJ0(Ka@enrNiI&SLDQ(qs6`4)D)2fd8Q2y`uscD~mF<>Qhjz;6*^&Soa{-F%6wx z^$>syBY+>E59n}%wim)Z9fQWU0Xrh_7toeTi$&zp=YKbV%7Y=$0Tytl;kuApeZ~v$BxM_voM_M;hr6099fQ z`Izzq#9OU2HVDASdR?~s&~agco8Lf|fSLbtqB;EidoNc7Q1};dfFIx=3w(f(SVg`l z;F8V@F z4U5_Oru_549WN6ssyQQT_B&MmHfbjdz+$XZa-O;bh>s=vE|uLoTKsb8TDxII93U4^ z%fOT1fvQl*c3+BMTxM4BP+o~5>-v${h|96+p?nK=9nb zYIu$O(dp-9fECNPNSj`IcaFA8;+XTt>+8|b;POX+`QeQ{{Z(BP~V)ScD&dwL$w{!!0g-B28??XwF3 z^@HcidOd5fcFz-==jRn3i|WWP>Dm;}OX5T!?JYnX zRSS#$zf}JJ=Hq%>t2n;Ct30%W_>Beum;lrN0pLeFtoH|iU+Gx_z^`io;zxv3J9vEP z;{gBoH-<9e0RQ+pxeHg|Nh;s~|C3aEcEm-K8`@pkxA>9pGn@%-_!a$#JK!ri6&%L@ zN%TKf_=#@AZyY6h`iz55pBRZV?r+3vbYbK~EZFEYeGh)q9|@BadXeE>p8gY`k?SNU zAq?I{B7eAN_aP1zBQjZmKaS|VDE3$E-}t2vUNpf+s_@H164wch_!6Q_3}WPW;&tLo z^h;lih1DYq734s3B@+&NBohBI=M->&f6m=f&yXuHs}#V>dUJEL?b|0S>VNfDcI(zH zTeWJHJ^%dk_LZ-ECH%&cdU|>cESXB?9N?ddddcxS7Qla0*653ii>$D?(DL*1gC%*! z2?k_cv%6asKy`4wbYY1tu3BuHw{Ef*UwvMGml_u9-~IXf_WeJ50Pu6|>%acBupppV z3(@im@+~hvSJ3+zJG}p>9XfE>4t)|?;Qyx82z;+uTCKQxthJ@h{^Sq;)PC@ne;eX0 zQJmlX&%bX0;8&V%Yazj3{K@z2?SF{@{7dBT8{c|U>3Kmgex>}}*Fu4-cKF~?!%9D8 z?&Uo%*=||j-?DwPb#`>xkweGqs6K}e9JLiIm)YyT^17{AyVAkTvrH@u18k33!kP^ENBi9}k zL^SXeck>zlqXS8F@UU zbUiwc8^-_e$Q*7M|I=^A+2Apl%Z*oM?O$cZ5TDxYZQ&r^Y!6h3g|C@LR_i(T?dZ zI9325jPS=p{!xI5VUlLjN;XE)s(g_~@)7BPAId}IH_9XVV$76-9Fi~m#7_kGk)PPf zasHK8_<)J{iQEty`H06FyO%1%^ffs~#)##BGl}`YPn6tH29%eWuh_UU`HA95^hdZv z7~+(JSXqeaNPUU!G0w&s&mKuE4mcDB@r3a7O`7;knIv7Y@$#6qQTXH;KXLi>%MVvz zb|~Ng|LnL=^Es6c@Xs@VABgv;AV{{S$NDhvZKW*Jj~r3!tP^Yz1*mvl&>z->v0%SJ zaATz`anmZ_m9^~SvIN|yZ$R8?L3&uJ2f}~n^t}MoCMt3(}DVVEn1TV*vi$ z0_p*f**XaD@~S}PRzZ{OFhJS>2!V_L{!nA+1b=#>nTCQ z!2MeV3ZIol@L!y63Ht3?JC3tFL*iN17v^bN=JCHWYNi10lM2|HM?H0 zFu-t&%4v&W+jFvtU90l`xQ-6uT1q8U_btT5wb{UyjFFJT{I5s8NzenC4t3HuPh2X2GB`o@e9ub zto^fNz@CjIVb_j!!N!2az|3^?pIKd~=R%(4X-hm_UI70uVjW!msP9j!-s6tAcu^>l zI~EIUR2^iyvqtIizVi7K)lqf@`JCceqwto?jn)z9HlWt*J{;hGaQ_%Yp-!FFP7?p? z*xm4Zr=H2HwK%M1QFbUlmsEaO7{khPtmpHr$GZNeC9z#&*!|*!p6j%M>_EfcYH~Z=>)K^4I{2{` zys?lGgWKhGH~Oq3R8*k-lW>=qJUx>b#+_xz46nv2yd+RX2NSRd*ne7$E> zQ{mP%8Ug_f0xDGyf(1~zq9`>eDySeLO7BHLnt*g6K}19ZM0)Qcy@wJY0i}1Ox6pfU z2@sOI<2m2=-uM2vV>kvf2%Ej1vYt8TnrrP~CY@HT{D3n9x=a&$38aSmx--Ti1;RG| z2XbaNVOl)>tPW3k|10^*o>i3o|B$m}9;bjsskbiXUHevEZG8*Y)ACb^foGjX6A`h- z(pa^5!A(00EMh)T?BOtdtORU9{OPm#Gngvan%vAI<#!qIcP;l}KMtYf%$qkE8}2v4 z2|-Qg_b#cGKlBWq4JC!vHfKPcntVe4a8y4bhW2k`jYSMIemC`~#7CB%LrmOR|Jzf@aQ(n?{=iyr@B`RoBE zKkF#DZtt)AMiH4vca@!hSm+WT-8yX{cWD(ESw7NZJ(nsZN_)?1`^(13wjzi?b%bM` zCl}zJ(h%;@T|+8RJS13z3Jt*)vQ6HhMj{qg*$&Z&ZP~-)7zL{=O9sd~^T=!6V;p|x z{z(dX`|8@}>Uk6L$Bl`KNhu*_d%WJpv|ylR8VqDC%bc%gni7Ls81qTZ%p8%SA>6$) zRZ&+uMcy3W8Xe-%Uw?i7I00o}&X_Q4OVwz)V2a5nq?DFrcaPsi%ndi11uO{1*v9zr zn|=rYQ~?!baxyPuSsftX@?&1VD7xu12$74std4ug#_4FVqB%TxGq^c}9?s?XwK@Gl zkz|HJN3U^Pk*~dE-rhe<6_Yxap6wz zE0^ZJ>?&fy%|-AAK@%YsPo;x*C1jRceKPl!*Dj}jl2Ar-2MIe9zHjvquBuL~ri@gQ z7{gooxfXL_7uYw=E%^zT`hb`CUKhWqT~hUt+we=_j(cQaNk6Ayi_iNcmmuZ&6z7fx zOf65qtwhfA94t%|k9>F75`}(j7X%xG&0gYBofrQqxoP`He%O4b>B={vOY+rE1zXpN z?{4`f+g{z=zgm*`Xy)PO={Q>~u(z>r{mlWQS~g(SIzJsVNX){XeB?eVyr6^6xjLn% z41y1p`JL?2oGWDK!>DKl>z*II63aB}mFdOjkyfo$o z(m2`IoWI&==-XE7m3K+?DS8?vDY#c$`ExFt-}~um&)#N^gy~L%$(%uc(;?E`njh}exwf{X4#X&|7}oJK1lL4_4&j|YUZp-%3oHLib9qwW~nZKERfOgPE3)k>IDpTr{%o_%~HF7>{H%cIa_ z&%pLPtLkJUm%{KetL9ItCF5v*Ppu#3uB23vZB1Bxd4;N_s21~x*jjFWKXnsnqn>ud z;-(#D=fTR04`)bqL8~KYSdMN}XhV)$=>5jUK~dCEWB)z<%o!LGro;86 zTEB-gKZn{JZ@qTYs91MM!UAy-RddsG4C~!*HU98#i2S~C zrO{aYl)atV^=p2rq*IwWm8W7yicMe=oY=(UcXZLzi3b(g>Ir0RiC0R+EFNzH`?fCXB6hwxd z_Q6IE)`so7;_pyRlZPtnEp3MjU6Jr8$LVW-r2zV}`bSq^jA8d#%wTJyZvYz`Tkh}1 zSnYL9K89nRsiI#Zm0z3K8ZIF2o2D`Drie4+p#7&AKjghA*#9=yiLJKuPyFk~5e1bT}Q#FeBi-9v&Ru}S6#vn4B zA4HT*9(%K9@razve(BPP`*jd;>3K@b4D&{Nb$T;2_-kV~_30_s@ZUn#e~Ms;KFVi_ zVrt^BTRT+yNVq$CQ+Jd6P>XOCnF6859B$n8cYFOTR{^rEzB^R4*YW~+YZXK`r(ZHM z@RLpjgf*9G0WbE)tdxU5V;JTdLjSlq?Xj8n_MEA2#$I?w%SH6lv*z!)=*4RkWY&Qh z>2GwOUu*~)dF{&ZIJD2n`SNbewA~+{TSZ|HAEtg?58rtd1jao}n3o(AVto8af) zc6b6BvRY44k+qn67KN}R`5`Ni+H z0)-{GDfCrpBI_vWF@PE_ViBs*WqxN$RE$M~8x(yTz=(mT$_Mugw&bb4z&dV8^m=6| z67Tz!>#Q|<-P9_RrRf8o?%bW;5zuWk5)nSdKf>QV<3Te^o_F>}k~tW@h_Fr5RRu=P zkAov@W?X14nH`)Q-D#R3irx?2N{WF*OBSH+Z*Q^d-U~%BLfKU!>mTTLwTx}G?@oZA zzVFkz#Lj%D6)+Oq4u4m2k3>yH0DZ!F7G9w}Y{xMskxzbyT#mtu4P)ik;C%@4o#&0bacxeCRKA1uAH&putCb2asAA5IY} z%ZAjJDbf~ma9*4~tWe`m3!PWdMj_WC*@!7NJ8dZ#V=K1YFyqmB37sN3zzDfLrBOZg z^0!(11^r*Z`x+9gvo^n9p*5K*I{u)8d{_Ldz!|1nxAbP4zJbw`wv_e?Z7tn^aYl{5 z<>~2IIOy5pr& z38qtO6%(ahG5xf*>dvpO#tZO+05_|&_NGTs@)ISRonoR)`UO%na9A$mXpxn|LGU51 zN{aJ)_2LicRiTBTCa{Q9hALSHBx^#FiF)mZ)X8>kOOJeUhLzSb{xIT^h=>I`r?maK zL}tjLl%4-CD;j$?+1CdBW=I$Jn)O5zFLpLN!|0+WYX7%IUP_Y)8^bnwEIc(p!Y6%Z zG+++duPW0~uEb;mYK5oPxIgx^K;L&OG}XSCOn0F71?vLoO*yoU87l%WEPR}8l6U=5 zgGz+lg-WVg+<@gqo0`%D+goo{k)Db2f~LiL%PjHjAB&Zk8ThZj*VoG)Z0dafot`eM zC-)*-D&>)sY=w$-8kYPrOPW9BVCMu7udLQ^%aRk00O`z|MWDfwc|hQh%)_*7LaV@q z*a$5^!4NE{YFPrl4T1u=n|j0Ck6Zw}w27VI;^Hz1Mx4Y{T~m^cwYAE&H{hE8FkQ^^v>1zdfAo@y_!>;`Ugu>ekk?X15oAXM_bXnG zX=8LdNii6rK+fvXuV?4lKR6r(91Fm6!2@sCUkC6Jah+Gu3%@0Aoo#WSJP{W>gd29O zKH}O6-IBw`&ibf*Wwd?aZ0#|Eq94N@bZVk()|yQkU_gWg35J^akrREEaP^irV)t-? z9>Y1IidMfJw#9Lx$1Uimo2o05Q&SFxNt067GK8e#xCD)A+J!pul2xzBu>)3+*z**p z=J`?im%umgAO=+bzPvF2SdSHv-2>cAQHMOi+sf?aj!&F~aq_Wutp9#sjoNaY7!%l#0F`3=3H+KDe?51?U#B(vwDiA7S zZ{EZke9UUg)Ll^N?( zDH3K}H-1$|K@{5*shj_nr3J~JWALi3u@tT-&hQen>}Ru8zCRApooPP4$KILKGOBYX zENb0jvJQsyOtoNV0NrL$aJ?-PYt5%FXT|keumsudaB<)@`vs7&ruMiu9ks}%qqzhx zHJ$ZHjl$#4&j*y;$k9kWn5RVBwY zeRU*AzsODybe08H`j@Ah&1@c}6gu^Et5+gKW(iRPgpdK&RF`Gtc0jp9v`;|QzX(f+ z0|*h}J9$_FB6V;SjdcKtNvmX>eF7w3T({;AyR~Zk&TS@QCc`^*b@gG+S2c@o%$i}m zKuyi5D2N272O-{kc0+1m{CSXJymD_&6vi9_@hw!o)xSTUma=u^KL&l!*GJ#EGf@F2 zPa(=MCu3#+lai8#Mla9lKFhTqTwTzr&b%dJckem|hS&}wQg5eJZV}xs zO&95!1q%yaC@8O_Q9G#+AiNf@W|2S77?eCHJ2 z!RV+BK>mu&ShCLD<{t_Yd~;<~&tDmUIuyU~Oe%fkr29CyBSsaVxT8a862~d-14b55 zub2~9{&6be%PaCjrC3mH=EYw>9)aeA%Gu~;x>-h;^Ph}uwQ+f3OsrBr>;}Sgt@K4+BvN$73fHDEiGyo#Ha zjuLO@i!Mlb&ASD?x8$h609n=itp94Nm}JBoZREcuCRGuYF5p@&`AcZ~Nr~ zVeN&mNrqn>RoQYZOs>Js3Cm$#s;B9-d$rWLJsqmdLz`5yRJa`u6H{;y%(U?#Te0p+ z3;f)zRJjAtb&pKvc6l2<-u|9$_RFFJ_p2?>Q<+G*M##DEGvT-l~!oUt+^ zKWk(};PaQY4-y_hO)vYCc>6sY9sllq3pgvM4W088zy;ZS-MyIrls1yrC)3ql>_9EQ zIJf#_U)uRrc;R;UM=|!4Uajp3@V6okta{x1G}p$%=yr}Cd!nf!d_bcDE-Hi@Xc4RM zjD(vUnnk%xpI(SkYtK4s25~Qc9qF_B!mgpi|CX}M$!jSa@#0wZ^#L|O6|{ZK)Lzp6 zxQ>1!c{Y19{4rCK*2!0Rlc;0gM=maj)$Tl}U)Z`Ar9LJ%X3s8@YKxhed$Uh-70o)$2 z(#XtQ!!1&S4;`crh7u>Qavv@hAqJz140tD|>}ze^9VSaKCkcph1c^-Iw( zCh8P%9Ed=PZCp^ScY~Mxm2$*gxo(W^=e;Mua4h(nE{|w@dK}~LN`g`md`eq!Lt|X* z7Y2s!=mmffyBY>B?H)BfLT&w4U-x&1Gl9s}e>uQd@*3{wPF`*fFgCPrAq(5}&Ce#m z8&(j3y(^exl%RnP%lwhZ1kVGUmk(z0igYy=Yx)SMw-TENRd1QF5 zu|@u!GY*u)!(D}KRB)meVYwUKeSiQ#+L%W*28Qz9fn&wptCl9tdH$fCk0rFxk*Xip z5SHJoxTo}o$P&}fgZMp7(x{FtS3V`eU%@?-Sb%CYO>{`4bh~L$JEN@Tvehr{NL;bb zL!|vH+8e-IxfG8EAkrZX&1nXjT;_D9FY9w6N=h#Zqg6k@cq~nyQuF9ySSVF>w(rb? zNB0zK2jX6Ay<#RX=0B@`zR&UUiu#`?b*WQS_cb|`;qV7stjwmXWDi4gQ?<^QGQILM z`+1?7s4sM_9HNb8ya7*syln)#*WU zMr{uYz^X0v<{C${(q|Q)b3YnQpQ4d4UO?`ujix3Z&^w1kc~qRQp{;_A!K2Q%pG|q8 z>{PUk9J!GZX`H`7rvfL5W~UXTGHJwIKe>Fiee%~W4(x~OG@fhn7i*qNhvk2NR2|&z zWR17hXQeZBo)*uPjyzGXEXoYK{UVTdfm!%SB$K*f$&+Pc`@Z}Y#y?3}GM^K4WXwEg zJkM9`1=-8(DAUc5HEVK!>Cf8kCV2J3Nu z)(v2q{n&UPPDL|~GnJ%^Y+IFBfNz+ctZ(R$lcx1V2NR3?Nx-YD2`dlpRZV1%|A zE;6dL@1JYZHY#3?7iu6bptNleTS?3t<-5)#)w4iW8AN&uY{|?k0i{`fYQX;<`i2d# z#cEK%xyF&a^Z(kts+8TU>J{ek5!z{tBzN0)y!9PsXP6dG5d@J{;yB|%2IF>AK0mAs z!kiK5bDZ4@|FVB|m&HfWD46&VXAnH`2p6=eOy=DXKbAcfIIGZyW8QQ*%gFQ4!je6(&s>sh5B!`s_%Cl=zairS<(2;*MhO_a_2kMs<&=X zS&zee9lJu@*H;7PqKBPYQHx0>?THP=XetJnz{T~>+z$D$P56zVYeDrHwtenqZ?E3% zyE}Zvy$_!;G51-09)A3M|Mym(X~>e;&Ccujfk#iy!go2ULuNzjGl~hJ6Ay7g4i^xD z$3ebxf$H;O$DxJFIE|4$+=n?S85l4e>QT5%&6I~ojM&y4{rac+4dTWe0`Yeyi{=TqTxe8ro?~nReq!IG7-wgt2G|OD=_xds67*F>i}}F4Jy1j$foB z4C0MaZ#I`SO9CgBzw3#2mndLtOFr#NAuGN+<>_ecEH*w})ZTy-V{n!HAID0tuxiYQ znXgImu434eEzF)@RFrr*h5rgM;0K@O7StCutU3(q0~SAuSO1u1e2HVe1w`8V10u;* zRBjZJ_UR7yQfgUhZsrZ;m*dEA-(dxOGj9hA7B@W3fjy!2W*#LIm35~%u;f~H0l5>R z*f{@}6r%C)11dKVVW9_C;pcb1$&gLq^;4z1{H2?U#0 zHQdP*D*J!gYjPsxz?UwN|JkBxk_;wty+FGNf{0O0NKF29VVxKrs!m?@p6azV)7r6z z_iIat@b)d8P363VWf(pPq3qBi10Skx^_g{#{MQt|23PvMblbV3gnV@CsSZwq>#;F+($BtO*dhMmUy=m*h6gVa zQ)k<|x#CiY?squrP<9||ToDB(m`g1RCR?u>Ucn+38gEh&1MR&*gg_C&1fC15Gjpr( zoy==J3(L3mi_62@l%c$f(Y7qEVQA$5C7lR)-n*TFM+w$ODoZ*J%GvOnm@wy*WijZS zsm-^-?Do4W19V7CwLR9s%KkRx0F&->X#xQA!A@BrRuv#e7`LRp#$Pj+sbqazqu~Q) zSy>DP#lP-NgqFl-$5pBGW>X|kdc!MqN`S_pT#|aFJbt#U6pQ)mwmx-! zq8wE|+O)^9QS3%t<-GHCvQ~pT_2dKbS;F)Cz+J#0ovTpX3hCOmwAEIcK80jh` zrstRyN;F&4M@RoItry^kKYdOMiq33?=`On?$!$Y|?a|akbuE{$E;bc5$RaPcXB-_$ z(B?eZ1*8Z8Qjzd>EcDarAGlE(Z$av@=GDORUFDuHTqK|{pd6gyyE#2+JCOM(5lKf! z5ajzI8>azT6vZO)Hbb-S*4YNf<4{eyr&=0`Ho6zbE={BaNb2#)?+(0FloGLjr!pRN zNI#7UH|3DkhE9{C3&u;76}MAqWh}!TFVOjC1c4AiAX1X9a={hv*3zX?2lZrrR;YThM*#;ts5=s=mPR&4E@w&5p*u#MPppCq1;bX>5IN5l9 zD3BKeAz3>3ktO;44-?P4p(DVrhNQfMyrGO?iIZ0wHr zlw*9WCQzSjNr|u>?)WlK>S@?AtXRL)01Pm1U2So8?x4`X&Az~RrucM?we0-VU9&t+X^ zzwyAGUoy}Yj_~_%wq7$tTV6)2g%&jAk|%$n-PEFYWyi+-qfUkq3k-VGvUctX@f#ea z^@#QDo#Hvu!5>dgX0?s(x-qr3b)kG07+cAmZIYB~Y&}QyqC)$Mu5qcc&uVKG*Q;J= z;pr?)F@1$bH9rl3w@^0t1m+i1Y-s+@p=5HzTb<$Tq?iJOH>0DvV`_ZApTpn<2xC%lFjEbL6$+nXQip30pVlfk`>k`8t-?7k{G%ESO zw~dQ%X`(wjMWZiRz~iylEcpCK_7bzMLhTbrt^J%;JO zeqL0d6KOA93L@p4ubf}oxLgA+drAc&)%%?QMI{y|4hth$n-!>&LC2Bf4e=(?ZFPtK zy@%#l$# zA-`n$j(&}Egs3j+IZIzDd(0=l>#NuNSN<<#P&0AYF}kl++^MCb!zvK@8+r16!S(CR z;~q9TLiY7cZ+FO#UjWEu$Va61Y%&AJ*G@F*R*^nm-(&rn50QoT&(%MeCZ+F|nw*EZhsjha+>_wsBgs}u+;Zl}?p0SyYt`930pVQs zWubyquuB6E3`0`rlZ!B#j-p+n#}k^jZw8Z|z0I}o!(7ohw7RLNudfW~-KKk9`XyeE zKo#CK-ZY2CWuI$4_lDiL-l=Vn7kLP!Adt%exV|Y?l zm)w}G>xO^6TiqDylD!N=PM4&mL$qESrs2b~8g_j(oHTSyki6?xkT5r1(6Mp7b2W&( z@cDoQBsckVg~F%;Xu8u7*R_GCP2*nGb%F?_VH2qco}ncSS=dujx3^U9U^+^bZ==^l zl`+KcXa)`t*HgTiTamH_@9|bq!R_UKB6icu8S9h6muWyw!Og&%QvL15R z&fns#qpQ+#+3oiJ`SS_YAa}CtfN))^&hsZ(mK@sFJ1>61y?@?!2IlhqE9P4{|JH9^ z*>A3Ru7a{d^R>D|mb^BH61Au}Elv?+U>YX#>$+bxxa=j>0sH5{HV2Mc+{7J=miP{u z^NwXnJ&3*`JFhoSK?rm6q$JF{c4s{W)N3U=d#6y^rrzn5oqbzjN#wP}O?tUI)% z-f?9b3rSes1-t2v}1pPT`a?lVr`xjWeQa^OjaoxHUzN8&jw+$0Nf zv7TCO*D)>d-ZQq;&B4itgVSgQThWO(_WZ`h{2jykmR<1#n%xY+48b285*oh_2dPLw zzZM)Qx=K2<(;I1gnp97!HvwK;@0(B{&Ql(eP{}Z+%N-%1;3-jUH1KfH-MtVy@e*18 zG@kENOBd(()~?*#s%V775@W}Q>F@8*vgsA+VwWhHZuv%WAb)G$3P*$YjQjv~^^Wb( zh;32b8^^kXXo`ahct7*++5Mca5Y^M)FjDW-(Sj^$=^$o3F2=p(a(K??b&_BnNQ+SN z5H~!`k$T+OsX7;t6k&O>^fr9ze2B&^-hQCVn#iQ&nVc}Gdy`(d|P`q?h3Tr97js{ih`dlNhr!QMpoJxpNbtH14sUIuuTpy=r6 zXf)gK*~!O^d``ZAZ!D4BZMfcg8OM9}wOH@w zZK@Z~+GMALoFOlDsY1B&a6{v8YaV(3Xd@j{&jn%XPG+68K-L$qw>m&$xI!uJ^?Iw( zQ@qHUZ`g6?=<11A%1{R&TkAqR#QVs4-==Kjk29l0%HAss+z`Ds8#k}K3*@HZo_by? ze>1SPU_$u!KoiFl)XMsD@%Zrbk*L30;O=_PFt>A2;|0$~YTR?wCDf*NPe zk4AAy^voOGE0Qp$GncrDA*y+-*eD*=@T$A9IuJt9RAVh_%iK2|GOd-y*oLyFPj#+c z=J~_zLRMsM{3GuYN)W7LZuFTJNIW#$>e9^R&7^!QB`b*JPpC1%duI9G<07g0gzpmhwYjck?G6U&l21ELKp zGypE}4DgT(q#wWFWU#ZXvt98|Le6c0{k-ATft*}#Ev+w& zv2U2wwmT|)tyNtmB&EYE!^on2UCJZrr>M=A&Dsd}kQTb9rttK^Ij21P!uHZ1a=NH9 zbBjiJfx=vLqAhga_QEg6?fS_o!L^f(cx3K}rhXx~W7O*oyT0^xi9X3uIZ5e1*%=vK ztD6Rug*8g3RFYv{t98i%Tbx#60nbCa% zzcwL7W5};{K#R6@Qv|`;36cPQ%}@NhI7LOWi5Q}G18V-xTu0v7L%VN{P1SjCh?954 z3s9&btpYtu@p?kK&xWlr0|wBAX+9+eJa3MdH1Bk}rEJF8w`IM)YdZ7i=c*YyRQZ>; z^z(1BH$@tQ3gx%Cxj|gQg{@o5gba!Fy`W7%NomIG!h>8wZU&QsHZzJ(Pui|yeOv&d zV!crUw2}LXH2@Rnjw^&i&T5HWs<*ZL9fEQ>VIN1imbv-2m5AtWLI?>Kdx>u=JO0i= zY9X8${N$ngj>S~>sq>;9UUttBlx?T$F1@=`<1n1-Zh{^SIgT! z-WG0U|0CY<63|=sp8I)}U^+rM>`&dwFs%;z;+3m+#C4(+Jdc`=RSP!x-*O2+2blqb znFbqq1K81;ud!6VKq%1rJb@8DcSF$_;P)ya1E+3Cxa(F(b|+gbGj#vpKv!QsA?cn| zqwjtO3}OqUJw$;roJzVcn3|d~ppS!+_pR6RARAx8Z`hbD=0nS2E$N5+-tK1X3_q+f zKDYeFOo}gE%fqFMv)v1GRP-?Lbmu+O5O;Z~RXR??Bm87%S8?~!+ph!h)sCA9K!PG0 z)3Ofj%mlk!n8x7jf4k??8XspU=!r@bnxVDJv z$>wB5T%kDnq$%344tS1FCn_)@FhR0l9PoO!C+q9qKq5*OIN0-2WY?1>Y3hi8Jdq6k zhN0d`QE=*f&I7lIK?iSm6(!4q2`)g1uzLLyV|OH?|3`J8U_Q2%hOlC=-SASCd+3W} zVi`9_s7~87zkag5m8JBlR=f{f@sA?0#5G2p(CzWQ5PSAUYKU!!p*1)9V5OrJjT(|E ztsHBw7%%V|mFZ~j9AN(t$HMyxmc+O^^&Q2>-TUmt@1!-ijee&NYq(`_ptz2wTcv3K zbBj1IVbHj5M)GFH!5(HQ7ktx})2rh3B-)X2GH&Qc{m@uhQqzCx3D@>do@*k2&j!ap9tRY)pOn^+Kog8CVwJHr zp9a;j5Wv}le#Bz{&e3%;o8t^w3OfM%5E4EP(=hSzYA)0U^;KF`iYeR3weJeSzR<#~%=1 z5J}uI54AK*wcR4J%}mYN6wMs6yYer?uLGq)!Wog|Um((Nw;XhD;Q=ch1Mp~JStA48 z|Lfeb$ntN{Qc?0(ax2Q}l7LpH~eGQ&Q<8S?oH`p3#{&%CT|dMbQ(q_Om=u$Zp3O)hg=><3de2W9UB)M$-) zfh$M;M)!@5jcOBq{F=yJpL>eLwDYdtJ#z03o@Up8sh~eM83T`p^J|8Gl(eyHIigB) zL$qp&8^1p5c!lr(X@Cdc&i5Fh%E-y--W*J+Jd_m#j+e3Y^2Z%&+^BEALIY9agH3z; z)nFhyZ-l1}w9qwIk(JC6N1=7~?Mw#+go~!E`Lh;sy@OwE{WQW~(7Y%B1?DvJ4`m7z zF928(fI!z+uQE5X(L;N+osjSY27iEUBEsYX3b6)Se0q^nlU>il@D{%;`E(|FIy(M# z7eIi*e$gk*Ga$sRSNjqGd*FM?_pL)33u~Lf6=G{&nizqIAMs`73>TD3&tB-tdoUZ9 zgn&v278)qC6T(dV$V)oYzy^G8n#z1&gTw%g=rxG2$MAK(Gd!+WVoPQ$`(F$*zwuqb z1FpuN)kE{28PvGd1Lzg1cjWU|UYJaQiC>-w`m2ZKpRaUx({cH2Z<~uYT`}Yk$;SV% z6kV>&e?PGTd)AHIdq44z`7fJFD0qZKdqrvulj1Nby;cBsta~l4YZ)wTD{@!xuIis_ zd2>86M|y1&fB)LMzs9e=EsAwbtemJ1j;>1;XWG`p69_ZVPD4mzw6m18fBd~vlIxN~ zX<@CsR+&ZhYi;q0ON><|8y+v$j1U;tyv;i~o0IjP4V&%;rL$?m(e`!GykpJ#AkiMf zL3yh}5cNp8w}0s;1RO*(1YTZ=HpzbdTC_5Lm1|IT)o8b15U3(enFM2E9sq*33MJY@K2n9YMsI1J~1Z-t9=qD#>Izrt&;M92OSIxNX zShAsiSKsZ&((aPw{T%3)!lXEg>eE(&2I2*hpDa{ zQz5Uu2_8`0#2}jq&0+yNwv7sTfBMUwiNZ0xySvcFRe1iE;9e?A&)3D|WHDVD$I6B& zEi}A0m*#9h&Bb7`Y9|57Q{HId@3$d3QH9uFh?P+Wd%sm9;Gb{(YO+XlpBDd~O1}5T zMnT%<=!-y6PTp_Bb@R)TSQ-|Sjw7OGz&tM4kXsU13;F7y}SiwtVJn~f`XHo94E*dG}zRj8z`Fe z<(5A!#F_(;jcnewm<$Y15y*#{(!#21dH%m8w*Y|#5<41MKjwhnm|tE_Q%wQy@lJ;;}?hxu)uC42vN z+9p5z=lsucB^Ss~CBI*r8suN@AMX8_KZ1I6Dt!yp8)5!hhX|`rcW&_5JPZmc*W;Oi)wGq~P+GJ;!H2eR`k2_4Yh3DFmGEr`FKy@fz8nK|@dvTDN{s0RY~dj&~$FwfwT z_3omuHtM6O=n`u-9s&JS!*9e`U5dP`dn)e+JgA2=FahNGa!S7FC{>X&#s!H`n0VWh zA-sJN_!#-yHQz>Hdt?GLSBZuDYA6Da(lXp;SOsPQuo5XqDC&~?e|C}OF~!~;iFz+b z1aeJ&bWqSmg`nSSQxPvl=YU2$9+L@b?j3(+VpwB0Ym~k5K!b~Z>9v)OVq&9+@b%8h z#>R9)_g(d$r#^|^%DkT|yqWyteSCa|{{A%w-foC8Yk~@ki&p|?X0dhMSU{bRkwm`(H4IcWk8#;@CXofQLuOO-{rJOANE6Is%hKaKD%Y zz@UF^Y*k+Y(=V9&%6S5ate1dRtAWC4g%D$0YjSFA7iO#}Il4*1be!@U-BeXbx8#i{N6mVWTFZt9I-c*TAd%>St&jQs`tqWY_x`6?GATw2EK`rm9|9R6k(1X2d&|M=S|Hn$H?{UkJ& zLFDY)JO8y20oSEL8b_S_t!FTZ8_rWtQ8Mqnf+N?xJN7=5#jWX>B=Ewn8y>?lild|zV zDUswzfBwvS$eSqOK6`S<7lK?NKPavT^VS?Z;;!k<&tkrBN!QO6Q7er(dA&=;g(ly# zho2tN4LM_4OLHqnKIY49Jdl?2lz%KW!h3Y{NaAIuaw(N>;~tRt>49cu+1AR5c6BG- zdm|VILp?JLa4X1eb#`XhZwm`cQ=h$fp)6T4h2O_w3JCO>a=teqlw}5S_$b%_?1cZB zUK3wRDKoLEC;1uBI)GvZWTo;Iu-#u-+bHqw>Y~zfu(x4Ci1+g4s$Z8|xv;U*l?je? zS1>e88tR*RQ{lcjcA(EF6=`V3uq$lw+?d5q@7ukp+uYsvXhkMJ_m_#f&s^vb)8Q;@ z+KMKa&#KNC1H^jj~lYPcuWmNNuF=rVr4*ZWrIivJ6!X1O^SEHTdUd z(azKkd~Lj^3hbijN0=T2;x*j)5|Vrh+P2bijJo+%F2qlbeVXG@9yv!7|C=y;4K%?T z9jWlD6v#+5^ImjOpE>K@Z{TO6#mDG>H{M&uXLcFTfhy3g&U;W1Z<^^8>npdl0OiEI zCn@MS0lo=D6*%4twKxC-f3GFE$^Z9Cssfiir>rG*g4LwY9t|3tQwThQCN;r4f)*St zE4%JyUr;e+i}EAYzZ;jlRo1BZHaCV!Jw^DtD&!Z-fS-}*TO0eW(&!;3?&m!fkWt82 z0oD7V+=hB%VD3woN}t``)G>UP%{FLPPJGmH+cPIf9f(tdj&gJWhRyzN><$dt$}8j< zQw8jS&sg;r+)62Li=d>WDOUs&nqoSC+joe7md{*^NEGj4e) zZpK*Wk$_SRj+U0ToMGX`M{4p4v=atEf@0yd>1kw=oXgbI$uu=Oek@zwf9l1Z$mS#P?t5$k7izT;iHM*}?OWwrnJ@N)hXgZRT>yCgv zp+M$Ky&BlA|Fe0qBAR@vtgBfGR6ET-z0mJh5)by>bbL*t*;p6#O-(hByqtfY4qiQL z{q{wO9LRQme_uyeH?BaZq_MrDqaZW$&#l`k(}BPx<)kTxg$%aG#72Kj9{pjb_Q1_u zSL2C|hrrU}!di8wq+04&1Uid1LRi8a`ml=M=e&ZF(%9p~pbu&^*0d&#E8yAn&CL?n z={(Q4`~0`40`H-((RQz1gQxLr)n^!v&IJNc=^-baLci4?QjO7Jy`~^rlV}cKD=RB~ zBctSnrKK=MVw=i0YGT`qzTcFyMde?xLtG04;P~ECAQZzU97ML5nf3z>uj_AB=vXNy zuy5m=-Nu4xA-wnS#sF*5QbfNHa&G`*rX0l#s~t|lE>(t}cxCIBzm%{;6#|_J?Hj26-s%sG7sA52ymu#TexdCr$4(<44->6M`hY$L3OBGLVp@bE z!)ohcG2>!=pChdzbuG8&1i%uq!^!zSGL~T+wXIKtmXDgd+sC|PD$|p|85cB zS*#{SDf^@B0R>@DZ%cZY~GP1H8bO`cQ@1?olTJSqvzWJPGE;b=F=eq`MH;n{q8GDEQ&-%wJx`f zW*{SL_e@wPvnI_w{>it%5{aM2vU(^+3;m^cC`*oLT{qvIL{9C+(eG!G2y5%!RYx2u zn4_EKs_CO!Z)rcsh;Tw7AmHLHPGIbISqtzCJWs)lW0q85&o;4%KSCFCwI3= z-`$v3R$!%{3r~hB`G&u;Yqftxm|mL3AIiN1P^QWm^7Ge}LuOERZiS*L0=xddVLjFb zy8VFH=obLm648~(m$gj<00#zK8ie2mkpo=Dnk##R@?!Mb9)L)V#d%-GaVc#VHl2DN zLv8`dzl^-`da^sbJjLf-uaK0SBR|!2%pHXBImrcmpr$X9e;)+=DHABvB&{MVt4ZN# zp5=su$2hvWx}qE=WxO}5m(~x4jqfUw5)%`P(G#rcXN&-hnEP=P2i)ETHXtlH_`x+U z035HA8@T;2+}kd7(6+l<`FLAKU_nV*w-CFwo`#dnG8%%lzkuDXy`h-g=b4K2(e;N7 zII#Li=|3nwZ^{zR4u@8VMt>uZA?8 z=EaDO^eLT)h-0ur`QsMV)zwm$+}0E6nmWstp%0;-mOcl_+^^%T!C+|*(h98)Ud*qi z;qGzs@|G%yw&vM7RECKS==+@b2p1XI2RyRm^0lHfpd(zR;+YPM+}?Ucfse21B$LJd zXZ$I9TRIy6Ocsx7f!fE-DX-5!vo-+RlIFq0)6-L}ip=Jx9MLvc0Gro|$&zaHyD)Dl zq3=vs+$cYioI$C#Ow;FirihIYC851{3l{Nb$K)GLObWoyW7_du#biMJ3QZd{CWd08KVF2)(xH}tl^ z9=|^4a^OaI8q5|cT0;B*r6Xv(JO=R8+@>~g^U_t?=3BvbEt)+*cHDG?sv$)_<5@IW zUYwP_=aRfD%~sgA*ubTp43{1kz=)KM?kJqJDTVP=Sr2o0Q-m}?O34g)1gz{=y;P)! zcZlvn0J%U2O|hv*{O`jdkus#xp8E2dg;>A?4njCnk%07NTmL<*@wqZ=k>9I-Xm1v3 zVPT;I*r&|d2^|0AAa#QVW%PR_x$l_p8kn|NBS;MzT~JceH-#YkoMga{ucDpi9wQI& z>!arXGM0wuA)x2O1em-(Maw)1EM#7alNF|P8$S!}q!d?$O}5g35VjzKzFxe2$DyHC znVsj^>9ExMFxrHx{J654Hz0gwu(?#D9UI3w4mY8b(eS*(zLjw;9W8rE7Ss0C8^?b{ zw(;W?9Wm>KaB86Y?HJHnk4^ZxNaL_`QJ6Y!Gy|94?QgI zYcb)6ZFg^Xvh&6bwR&*wl?a7j+8@R?D`=49jo;eq{+bn9*C@hI%lyNX^GNbnK+&Vi z&#h@r_BoHl+|#*$x9Z6ZPWFx!hW<&BjqoD=dd|qqsOzqsZCK^h;=3Ju)vaC&ME>fk z1w<{Yr=O{bDdB(AGh-=SV}x;M{(mqAg&+XR4rit_60tdD+!H4(q4}RNQH_!h8)oyM z{0>|-Za&fdKQse)NPzZ$w(s7#bCHszp(2#jz;vgLD;<&+78h$Eq?U+bA3$!v-WEF7 zyZ{`Pj|1ebwfS5rG1$$~gGm5P)S1TBm;<9M2qrrC!d;)0W{a?~0gEF@WEI_-eIH*4_&;{wjv_1oXx=bRfS z2(GD$TDWOApjfdCRACCaew)9aRl3LNTeHS8n=Wu$^%OKZoqEJPl%~RM*{e2VY!#rh zm(m`+>4z=VeIA5_L#5qJn|CeBd3i0L(G#m)w$~j)<7q~Ia;Vc&1c$lzE;P6}i9fjJ z_(auWtLw5t9z&#RJoJ<|P-~*6 zD?)Qfj~pUkM+~d19TK0xkHlr1SQ8CAi`~U|JxPQeSrzM1i9o&iu&W}Jduw5KdsRv} zx}0x`Cj@vP$ROspX97?@o_~GO`@#0Eh(nO`$@%VQp#xgrNqWk?yjt4D13p)2uo4%+ z3#CX66ueaFbQ(nzYLDUu3a(;GB@X4ImxHC7XhZQR5&B#;*t;h|up^ZbM{)sdR&z|A zMh^wFu6XZhkQo%uraP6~6~u#*$pin8N(l!myp|g@pr#7N+d+%FV}dB6{`K`$3F@Ey zA_(~_xz=3_n)GKiLbVAZfefq?U^N{Yj{Z{uSZ;2KaAqJL1ERRablL==e3G7jR`Y?R zNHkjY1jCacj6N%IHFU3IKD-TsK_>N&ZlEL}$QyTUAMy!Pl=M6eLN2QdKnNobM1F43 zKM6wmRV{t|k6?J9(oas9y$9O~CM_0=MV)1QLfp*FBFh+4SUXr5-i@Dqm=fsiWC}DE z=uMKSH`e!W7{S6W{L$O<^`USzo+m(NYoat2p{H=ViOd){N9hy3d+nC(W8#E(n?Ff;HwU*P(dx^UAp< zu#haEuqLyJFToUF@BUe?rg-2iA_D9aqYoLOy@i6exR5J=rUk|XXHA(L8HEU-)f(r9*3fu0Ar1l^O6+ZX=hG^(&GS-+)EckF(j zR^9v~al>+8!<(y&%rb+6s>6)IL3~))krU?*T6Xf2P6Nj&+#CXmU#I2bsS^VC7m=K+ z)XQQ8wwzgABn;)li39aCKFl@)8g5cYjn0BUS#(9uUfn!|s2-9ouiH>O<;S}**I*G8 zK0D@=d&lU*&KuCci3@y^Zm@A>U@57=KqimwQpst71!oE3Y6|{ixlyRE{3sTC$~_y? zYS9ip$x-5-s3CB2-UD!;B&U923~ksyInW&~!;tC=lZL{)(j!MqZEbBqe%XM1 zzKen9pl9XQnsrM3G)u7RS=3>0NKitNGrGc~l2gfT@&eTh1NZyEOXM%(o!n2-ELrTS zwYNUFd!;74qt(dAINNp|Rkr5~mBrTQD6kyoN!wQG{*T+T|Y~l6LCzOiO#xGQCI;p}z>1 zxnCP6i~Y;>e*0jR0OSmc9CydWMDHQX`OLE3Ugp^N#Lr=^J{q@fg=8!F-x&Am9>@z2 ziic{# zrQCD)m@lTHGI=UVFQeF%Cb0Ld;vHY`H`}WD+S)d#!1=ezk)PDD2|DczBYUgXzd}O; z4o22iR?4+~)?F55^4H?a@lk5A(Hdbw_)a$X30?@?;U!TZ3w){&BzXE@WxRSI5Wks) zP8QX7YziT*c-YO1*q==)tg0HBo}M1BU8#TfwC`CZCJ^oA-_RoCGGV*xVz_i*Vi-4T z+O_D@xV^H;UguA`Yg7V+?L(B^=&HI z;m>dChl{P3G`@6}hdnQ-J$vPFG=tjKn{C@9y+Cg#+g#(@eN7G_V8lzEn5KgC+cf-j&c3tHF@C0q@V15A1w%k1aYFLaExpD?KLwmG%JMd!^HrV`q(`;9Shz)>3MGWlqBF668JSkM z@Z@iv8$kAcFR2-`?|zlxRPAxn*y&jiywmxdfcG5=>iEKVsnrMcYn2b2oZ^!Qy+*oTU0zB} zj=Cwu#>~bxyBuy6Cww;aCW`x6+egi4)o=7TB^(myVW-;+{~d5QPWYq!olgG5kywca zVF>cu^S7tYL8TK{PJed?rC_BR%Q_4*^0sg zi|B=F<}3wvk3`-m&R9XOurmQKPQqU#y^E0FWD$l2y1e^FVXQ}TS)UrAr zml1?KWKi@j9NnP0B}NJjWbIpXgmc$KLksI)Qhx$1klH7SF*!GHOC?8|+iLMEU|w-usgcNv$BOd-doMbEnce?jo7uD^sG z?OwS(7o&j^sjjP#E-p6`+2db0;&4oY{jW=ZYLpG#QmYcW1y}c&@ECIriOA zOG|5KwkzKYYn&{gTE|Uyo#}a9yjD}+dw;iZbu}Yv$GY$tMfwU=dYm*3Y+`b3(J|uq zIAAF8H2QNiD(-9$qKR@(5nZ_KSAOi&CsKQN(B5MsModJWv%l)^}f z4SyN2%*?uJ?;t9E;oSHyYa5Ge{+|TSo&~UjaVcy=47z)Rs1gPo#+7TYR?2mB813M@ zzgbXX8_aHCUR$gMdvq?imaT3Md)YxF_MZN|vHnl?httM|RQ>W9S0X zwQG^*?~ZnfdNQY^I+s#@?pIYq2yu#+FEl)v&!;xAS~Oa;!fi z>X;HbZm0}CFmKGb791$nbGiY1v0FPSYoGGHxg9ffo5Mt&0We0ac^M1CtGf0qQR2x-DaTnb3}Q5 zb8>mvw(N_8eqpHx%J11Pw5sOE@TrE}SI!Eu6`TX&SZ_WnNd?4W7+~SQKTFaR1N1|B zj@X3@R6Zl~H;m;nw$+9+&p{iQgZnHJ1}ixbgGunFl(@miTqf8H~ zV@UN43i2IFc1q4JC3MRO?`OOwR0xreUZ&g&EkHdocPF`qS}R>!Tx`BzWRfaAJQIhS z3=9Bez<<=FD$MY@G+N_(e~DU)jQZO6^{WOvE}B_D>c&@Yp*7DEi!tEuis|I9$E(Bd zOL9X4@7@CVaXRCJ66PdiM>c}=NZ;Pk#;zX7KM452ANhw}1lj0njZd;XL#jJ;*$Vu} z?1<-O1_}yL3?0SGh_4JUu-lE-2%CqeKiscs#^tf>5CFNB`P;MLAC5T|@IN-PQ0mHwVpk zhQYxYDp6(cN#`vA1FOfuG}l_I@A>**F!G-kIyd)Tfkrmmq4_v_aDE3f`Q4Es4@>1Qch>W7&J^=Kdfjf}-00(zR@L z9vo#!BQK3AN5tkJkU%o4o73t(lUBDyJ> z38$}r&u}!{xG7)PY`4Nx7>23Yxtp#bXLecg`K6ghRr6Im%{tPY< zi`Q@8GhnDfKp{d{oBMSUZHEnq%L|%+lMUFTG4!IP$7TbGY%b)=<$-d*yZDj(x_x@Hr!dPmDV7daCD6ETl)*X_w1qB6rcv0-^=h{dZzD zqAEwKd@yoLtolk64QJjk#iCkInWqhCK#<-k$tLH@7=^SAeGzZ(^}TWmosuHf`Ocuu z_dZMi1RD76RXh>x|IIik9zvVWj_-a6J_Z|CK6AhvjN$>R1wnj113~84HTMmz`p{A4;#{#jdh02J>-vA6CvQwWNEvswL^yzmof4nOP~ z7caK)76duM0ia~A+T;G0+C1i@N7D7?&yoM!h#J)Y1F;^`xMugN>yKFrf>U(L7yq*A zPyd^$`%7IZCU42{t~uoRi&4Aas7+qVa#K_Af{3HZJy^(-Aa9pDC}b2AAES^@Em4x- zO1W1*%^`w?U!yys$FWEl^U)n~@dU)8# z*q)!{0v2@Y0Psr`o=|CZln!JIbplSZai4uLSpl}u5qJi!t#+E3`L4WW4WO4k^66Qs zn>Q0lgsThX7;$keuVt)?gYEmu+58~`)4=TT$m>-V0R@a@tVJ~+zyAzIHh=uU_o@l_ ziyl~bGAP!6%i^!q13sy8Quyd6u^!(TIR`@Z->qE!k8%8rxyMH34q}=Hiu>n1gD#d} zUDyZ)`~=gBDNbOGIp632l%*G<{|GR}T$S(Ia0VxFXu%H5uNpKrkY6X8a0_ES7)qhg zgav+twR@cPNvq!cdx2);FW_v`{pmFnSpA9d*Lq;lz_g(hK z`DeV>P-%72BZ-FctdFKh`k7ghZ3Q-23@z~f6bDIS=T!o$@E81FJJl6~V)^qOT|YH* zd!iN3d?iw0A3k{0#8AXVf)H^F#LZX#P4#B(qS5HWf`YF*s}l~OheU(1TKA@N=ei3&(l%cvho}$PcGk&VQ~;SL*IeU%5YR&Q2f2ewYe^ zjAfUw7N6w)?;!Jt7-W79%bHvUMdV*WM*F|!rgl8v1_vL;$H#kNQ4qvTyllb8k6);$ zs2nG1JS#kx?sxa~Ri>u0o}{B|ocj?T6O`3=DP|YpM|_}T@Vs%A`RwV3_lI@uoC4xy zPQE$Dy}x`i_7$l!{VKmwWp****Lw;zb3TgKRJBg8ktni*+gXZ-g^doV=EEFu-U~EW z@B;UrH-5Wt;k&q$h!l_VdM(1u{F|YnVL^Rii`KQM=(zIcYIPYQX#}672rURI#3n*x zu{n_*=6;=gx9e5^Ux94!zZb}Wg!=m}h%I06yw~9_lHg4oB_ut`Jo2k2ZDlsatYUiF z61<9%6ogb8^;zrh(dy~l#D6I3C@7c?*>0~u7mS$3YDJaKj|fvN{`5t{WR9uTptsk- z%>)uV0R~!cYRmcz?a@>5$QP#{N_>7(7cNt5p4pSB+`*T_H(}3_UUy2EeNEjLOZWWy zZ3W;a#A0Tq=eL^_B5Gby#D)LmCNKVrZi0OTApr6&JqQWTt7xOl?%c5~EiDE4LYN(c z^~o|3Z?vD)?_NII*1JDwY_a}9mpO^aV(KXaGc(tt-ZhoX;4w8GbILgS89fRNoy(Dl zlk(8Oq>aUd)C;C$i*A;}Py2wNPk$EIrBv#?9j)3$x26sl=G=?VY5(-K4t>f1rHr&+ z$}AFuTJipl{D*mf!v8BGV#{>540Ql_OHLk*X3Sf*2jDHTr?d&j8cUJ*SoEXC;*L)a zZ71$?ZVrhx$>~G!f`j+R%jGF6*ChZe{UrR-RYt(EQ;YjN1k}L4yJxDf)rb>jivrk2 zc6WvAFu_ESgFr$uUAgm>Pg41J*2BON0MeHAGXV4K5+XL#B->NH_4V^I6av27lE^Gy zOb{I!Gjr);qhwBDbzo)NRadye5x}C~FTy8T4vScHLb_yawqgti9+uVQIOisf<2f)V$p!%=3HS3{sww>jm7?>L=PDAG^swR{a1F_Ncsb+gVzC& z<);J7{C}3p?RZ>8AxJm3Qm4tf2V8r{&##-AWtCS{jNz|hMi#y^4sl~FEY|Cp*}0u` zrF0>)+2cCQpN)W|uysk`>JbN*)lUsxWzTV=FER|xMN^7DA00s}Yha}Mp%BT-;Q^XJ ze$2VFQK~n7)4FxPGvC6;qOYVPkhMv$aU_J81mY8-P(GBz&KYQgtq-w>XDJaIa9l=h zV*hF<{_ZRCUJsJk1_hUK>A9;^N0lP2BU3b-DL9RyB$MuS zrni@ow^8G^th%B7mLtOU_?nSH-3T8aUwRZ+U0T(G4loB1Vzi=@Jro4Xv>O~~Q#056 zD_m6pG}(9Ux|(_2@9vufgy6Fj->kz0^0ZK5p(Q|n{{ddXPx${J+9HGGY#v}?0MTAH z4NvxGU{(KJPhGcQn*^{$lIj@zP%0S^N+PF1mJO4*x7TCCfPlgP{C1w#^pQokm}cqo zWuKY#2UdyNyy`rXl+=Tp_(_$eh552yAJ1!~CO9>@PIx4FyV_}Q9m`Lb zH8f(&D?JU6eL-z%?S@n1d}&E9IaB?U&w)Ks5^GpGe)MIq^0&7{PBEn3{$r5H0ze4Z z+O`0mY$70_4;|o6#ptwWm0ySWr>a?zKEh1|jfvn?A8vsJ8UPAyY??dO!y>|h`2-4X-5l?M>j8- z`O+}|b_TK{_ud!T=A&OEE+<)%xSU8oCw+X#(?4mmaqItsGXkH?NV!5L3k)~3 z_UO4EvAQE_G4;0=>nlW&O*j71qThc}i@`>edn{rG=0+8cr732CqqlC|5(5lsb4$yb zyeBv>v9p7AK_~QikNhfRU%_iR#P?CO5SmJ)h6pSSnWS^GP zfF(aD*IbW5O)%nCwLmbJ1~HpRef>#A|L1;#(K`JI(HsBflaZ6s>Kf~Fj|RP?bVUAM zT|j00@X5z{E~XfP!y&uIfdzbG-W zJ(``Jy>D#13_Fsj-+Fc^4l*c5U7!vE5ji+GII=T=UT)nFeW90b$L8C0t*H!yANt9X zTLcIzVsnVH?L#MVyQ#$OzW%-4reux&XcUb8&vsk>FSnbRPnE;zy@Bw*D)md7Q-s4x zu`eDo8MuZJz@>qRhlib; zJ_CVEtmVVpkQX4z;OB98>Z{0t|Fr}1<C zH;#fgmz<`W!z+9@A0ei~S%4cgrS?hhuz_zN>hP;CfDm|%K7UE207ET+scP$Nhz6hN z<|aqe^Zx;55(a&xube`!c@d*y3Sr+?en#3qq%YcftP!f`&c<&XZ)~2bcslB}fk0)=r=X5VB zN&Xa<;p5|Q3#;99Ng*>$;x>ETYy3T4eh>7OgQ!Q^mr|g zNFjD7ybHl;ZDO7DuZ~_%PdVu5?bbC8|Jcf2!~IW=6wr74G;tnL*V)+sIfOA*OF6d# z?pf{5J>%ft4M=geTn(7oU(HowCK^`eg$7^$Dzkv3LDBKPqlFm(jdtkmFE z_DAckLP2K9aVZLaz29LhIX z20Ax$tSl$?nSLZPEgt&pg&j9k1u#{~Ju)U@d z%GVwMNJM zii&8;eMUw`A;&jp>yvw<0=Vsy{(S)|!$KRQy;W?0GDe_YL|wE zsX2g4eQL(e6e&^{EIJnHuQA|H_51k=vyB zDy|EEVPw>I@pMSmsC+LT%lT5XH}+Al!+a?`kHj6wqqt*;^G zcNWtWr`fdXs9=v;&^;XybmPAwz3~(2h%{TF=_e^`ifx_}$5V=Jg)=ID!ND`2t%Hh2ZafMUr2A1WPF-AB?qmd;ncRG$D7OFB98dc-=BD*_32+>@;@CpV5drelS72W zJ5yXx{?bIwd#pD@#d%IUqvVjw=i89;h_$a0ZE;PWHb>caX`zDJ zvThUiwg6(|LqnWnMMd3ZQpR2Lndv5)7gy`Q5E4**%E@S7ooMiCjobdSOFxEbiWN{*phtT)to7|# z(R4G**cXK*gAT(+WJN0`NtBI)E*)GQd&=%nsq21I^)BsF7nBHG)l%by%@^XlxsV8p zI+wN7-IZiIUxdZA%nr;zJrS84N|63LOn=}-%|-5-c48bWMc$huQHV)RwOfKlC>@J{ zt0tjF@j?oMXBbIl(tRZPzNaK20<@oz&Vagt&;BZ|U22@1V?UHr%CP|%D(k|pcOF8x zapnNbnz{->m#^Qh>Pz-tTv}Rpp0qOV5u!=@HJX>&iH8mF0W_gjr(&d`k;JK3)pzF1 zVup#ZL?iui6-7s&HDS(cw$t@7y{^}~lHlbCqg(bhxeu)^3wS(i%iDs{87jFQ4TUfp@jKGqpm-x@6g3DdPh zujuuSC{LG@Ov=v890zVHuW`;*dVf7^7``~}-{38N zv3157>5`y>hOP5&;E<-bS*InJN2fC@F7N9N=jxV3uAgEOm7C*vK9#k5Fxfr%!uRF_6R^ zaJ6%3K&`ie?#{E*yAS$9!g2VlnYy_F4;Lgdi-wQOnMp|T5+fPaJY^OmO!%lc|3igK z!3dQa@d6p@5DT zUl+HhIV`)B4-(!&nRW?n`8~6W2b_aD8!Z|u3)=!Ol9Oiz^$y^A*NKUqr;0+eB8YJ9 zUU`PH@6bn=AK2=Fgu&=jPR@bn1s5I~oDuxk(cWf0n|WhP9Y@`8Q&WyP8=7T9Ed)Wj z4%S9%i!VVAJ04BO+?$WiHmxFz1h*959s$}9zjK#M&3uspcl?=hC;H2mTZ*pOpyZSk zqd=AWkgshXtZNvi4UdQ1ezS9nD*PE_<67rHYfRUp@9dV{BfuH&=c+!{<}#SuYt7xv z79Q~(shk;#?c!JJwUKTqYfx7|ydQ#fo8y6Y*jc&dU!n=vOl|BLwRfGENstru~Ld1_eyBC&gZ#IOR98iD7i)&O7oIi(&`z(QE1*tuh{+V8p$};`-H+Him;6>_Iw+in@xFG0yBU-|eH~m%7;vxqa%;4nSGn@{ zk1fAO%zfh2!w%a|M{tvQCtP@awQ61PGw=UGPJRoQX*mVv@To;zAL{>ub65=roI^l+ zQXp&bY%sypV9}T%RT!i~4HPo3rnMq1tGAcd!NIQF#cQQRXA+72E?wp*(z?ZzBO&}? znGD_#p^B$gTFH|4{lOKubrWH|BrW4|IeK=sugpZTVl>#LA>@fN+TDE)wZ>^*EA{nRp;p+p0v?nYdfm;SXhU%SRPQzo*3)zC!^a{cD|kL zn=O^u5>7&2BVA-moqosK%uL@(QRN`5emf6?uGyv=r8C`+e%RAh`t$k4tPvC`E}cWeqacqc_K z^X@8W;0XN251@a?M{fuNISGkh_C>FEvCNZXl$^|ES@+jzJKJr0RK54SSwMP*G zX9W$YGY540bzAyws(4iUV({-becL5L%HwAmJA9xe>}O?m-jP|*aeBt-ICP$y_Mp{s z()#wAK}jHJ{S^r0J&r|f(PL|ug1iqOurHxD%@HpU0QwV7=@&ZTl1ScFuhO1kEW zM~hzZ)8=*-irE@{%>-#PH^D*bgpyIljfb38@pzvg!rD0ZSGbo2*>BQrK2<@@7_(nQorHwG(*X&)prs@ z*w**jnCCUBwMb$-mS|iH{OM55?vp!w0V7@5mm8Wy-T)|z;xGcqhUX^3lTFx=#NN@T%Pe$^E zV7QP3dEfU@Tzi%c>E;ISPo0h!_=kD8Y9}{id>~~V&2+UkL-cT8&ERHIM-@=`xBN8k zFHe)cXD>5a>50k9^bC?vZMU=t|GbVoN!X^_df;JGU+#4&hb#O1gvj?3@bh2}kLAfT zAt>Zp5ETFM<2gmo#plP1`i-A-;Kx6FC;LEo@aDL#PAN^+C3EvjRz`-?couZs6sp$h z<|Fhj%IBxYp7tn!eb=J`7~Rt-p@F}W`-aJ)HjAH|Job8eZKTka{6U?gz~Y@ZMm_7} z9~iwTMDKlZlo&Qmn$;#e^IM`kx-di*;@& zGS=^4sscG>(j0odS_7Ll?m39jd0kNOr|7Vc`_GVpFlH@#coz)t9vuN(|vAD1TT*5coPqddzdLu@bgOcQd3f#OCN`N0*x6Gfy?BLO&My-lv}kqq z?EdQ6#r7Z(3p+cTwVX~5^sa`QC?JQlv*bd&H>*=^k?_fGX>mwW!3aLvlXXzV$8F)T z7uNZJ*nL_v)?>2#NF@C4z4he$TW1H%_(1-Q)WE3jJfINyow?yJsCstNxKLGJSNcN_ zordSo>Fdgulm{s5MQDkAfMI&vZgVWH`r^&553KJsJh%=P0`MouE-t(b++QqbT{e!t z+$T@}wqvYeLU9F$FXnyJh=t9kGu?*vACJ+{)#k=6ylhyW_C?IdHeRHC5lCnocAR}h zp3ZXkYLPwlcFBM?r?MkUZ-y7i%l+`pbXELV@&%)s4Hd+}zP|bUKF`Uy#=V~;h)uf2 z2-)$o)N1EZudNI3nTS@((>KqtFpnP7sVu7?8pfRw-T}c~kQCRWkBr@YZRemm{%esH z3#CFC6p3!Gx0~1vu=LzC!%V-{M)zMPThb?qlHH~z>`B8#VO0G*Px z1J{Uo@@f6TH%Bom`<7`dM+9UNK&!G$Wg3yjZ%VPQwy`>>-t zoy<_|haTmVEj%BmrQN2cEh$*;&s5VwEG}~?N$4BUv&-NM8ts(yDXv=bGb?H;)X2v1+rXscL7GbaG?e4tKN=tJH4-c=f?dKCcf8LAhpgM6) zd3}AoC267knGoQEpO47$&b;Kq=CRl*q|15vG$uEncQ4&4D0AzVc@F7dv|e8zcR?>T z;^mj`ZPR|ILvHQv3=nqUk_+d*%KQ2p>NJ+gA_)$N19FKmy}tToN84(T%}A%4vcvF% zjBkg53orG&xGHkWN``UA$D=H!<%3H@ifHD;CzXq`7tXKFDZ&p^ZKSHr=WMuZPK)fz zW;iNUE$G_hTaB^xmhIA*TbhgCFMl7xAAMm2@#TDad87>7A`SZ_#BQT)DK<+c`-{Wr zojlAjmFVM!nI#8bHVbcSH5*~NiVE(!BP(>!(xVoc>+51O8*PzXmlm0jVB)npQCSYN zTj?dsV%rLr?U>3)WHfnIS5<#}UWIGsQsm~(?JR#xLx~FHf`n(cTnQ%DL(To&w$MP= z1u#m9lp-FP#qY=MQY>)@oinL7CKHmeS?ONk9!+psvkD(ERlC$}db|KfCz?zD3afr# z0Qt#2GP=9Ae@bRWy7wtH`UMRg{MYgnsrqK*syC*-?a*Pb_vdj8Yds=icvgA!c%}J* zc-^_ibqmB-nZ}V<(9taH$xDcKH0osI?{2TA4|I(YnMF@*04`YFv#( zfwnFZ`#k&(K&$^gt;++2==|=jllx1F1WU*LrUMf5z6=Nl9<)laj(aQgtsAQ-#b#os z#OJ7$#yG}yFHujGiIayu-D)-ST{)vPu3AE;djLvr-h=byo^;M3QOFGG_Z}h}p$Kn_ z+Z#+Ty5l)bhg85sex?ue*tqYe(@wgv%?M$}C2)!w*Jpus8U1pJg57FZ^o!HlYF~!z zzM4-&-uq1Mm~pxlG>OH&I|bfM-H(drd2FSG%KkUXy)utYr%SDC9I%zJ&W4J2W4?N5 z*waJ)M7Y%~uE;BQ8G8TpNGzn!s<->-%i+-I847GlOWGNA?{t4u54$kdg^DyIh!Crj z!E!+d)wn&%Z8;cZDaJ59L+0N&R@#&MH2(~)I;w-8?BW|^e3!j4nT+C?(NxBKy2-o9 zH?b^^UfKypaO(@a&!XdLi2N1Jy?}=xRh-@HmJl-Lj7Z^=RJ{wV>Hh9 zVD33WZ(K_Fp!M|IJLcx37W3gx*p-hwZf=*Se!YDH&v1i9h{1nns@C~~6|L7dY53-s z3x3$_9_J8PcU-8&wfG-PDI#L z?Q(Bw7jKTIqrM*F!o80rBHrTS4jEn$s&P^LUsr52BLc=TiDvxGn;edE(}%%Us~qXw z4QvDt8@ieilN;@cExjZSWlq$&eYxCS+}bCbtA1eRX3XoxjnBW-{uw=oIGm7Lwo<5G zPp9}1>RRybGtbFMzcT#?o+p)XGG>+YQ3kQmQI*g4?#4#Plx^PYSax6WO@U{wXCxra65|Zs>J4FrBd(Rl+@SNKQamZI=t8{Clc4k>!Xot zBizVCQ6Iz(<(}C=ZPqCKc$W}M_js-!Unnb#C19>u5H^ePOfnaDNToBh;;#g#;O#V` zJKi#mIopPUOT>ctV^6w1cH`Q~I@#O%X}w=)stNh>B$^JPi7-lY%e|?9#(F%H0ViyP zGiPI-8rwZfV6jDhtkzvD>+;Azun1#|!#k^H_IV`$-H6ODE2(d$TzjXqAZch>KmxlBHwR*UL~& z$SmHpFpG(axvbv2Uqbq+SgqDj?7J4?j+~$Xj;;GNw_TpqL1*}}@0>8qCgL)T1GKjU zvEL%vJ!(o%3F=qB4R|_Njc4u6iH}vXa;4dDJ-*f2hcmvMG=(Vn4mmWc+nwp;_I;eh z?<)T5spy?MuWy0d0GBnIy~u_5t$R+h^0{)&(1i8;p4v7$Lu^8L$X+*?OAq1}T`~+q^RVH$GEc`n`6xW z!TMZ@0<4c+RPN|s$JZ9-<2zGH%-eZv*g~AOce;q zc_VdaLZjJboZLd(=Ay}w9jJNjVmXZ|-necb0vkyt2gMOrU4pp5XQoh_dTr}QmtpNW zhAh5)P3kcBC-F&86`w;b7EBr3zm08v=G3z%f>}XXj3+YpCN~Wfyk){Xx++%c8&R(b zSv&Jdm=-)s`gK9t=76KsvFtdWMmFa2CaW(LcGfi>hYEAtU6+e1*Vei(_nQoQ>?@JD1*ZE6((KHJ*RDk< z;SM|A@q=7@`9@IRtoNjE$M1vC@anErOjSE2(!Vdiwwshev`hC8r_9>P?6${!eg z@2IWKrlhCis4zt?;e%OK)q(F7+mD%Q57lr}W#0(qC)7sB`$_{y>dm#+6;*mgmBY-A z>xW_!V~71S7fm=z0@JTut{)hg;-HYl#aWL8YHn=ZT&VAgriSsdeWzz*m~Gvq9!PZ9 z$%#uHk~Hp1@nXqGcPy!e^|wn|pKMZts)Rm};B!;PB$Jyc1(YPYriEW@SGybp4aR@3 zUUF<3r@3$F`=j?BS6OPOPi*|HlIE|ZGKvBn+RxWI@G~lO#{m;peINJ3CgA*}N2S*w zgtB09tV?)Mqc{D-Q&yxNv0|boQ2gus$G2Q>Gh%D)JUz=ZR075V*JI5_!0UYQb8yGd z329a*=Zmm9V<$(MNyJHu$amNAxf&^Bb-tTo5)yrs`^v>fIeg7vLO$TGQYP^nhMT=r z0H>qa!;(#ZDxF@WyVo;)A*gT> zjX?TNOh!Zzk{byUdq-uADR(}DTKdwM*GP1Bl#+vXLYbZ06-vUp`Z!vSsu=!5?%0&V zkYizTue`vy?8c89XB;)-=}UHM@K(3%6=3hDoa+Q2JH?J8Z_)>1P0S4LJq%|YwVCTJ zpKj|;7k)u2_+9F0Q2sUdJG>nZw5w<*jJf(3jmKw$yGSJXBqW+0E(vkIwRyCE4uXGZ zKqV_5{klAta;Ki%dG@Q*TKx)#uF}R{lEFO>#{1dlg7p$OgkI;J$dsvn*X}1f8XS;! zmg30H32|lY$RoT_ zk(0SGcp>yvz*-x3ZM0JQ*2Y0GVWK?Qk=>VDnat*58V}a&v!j6E!B=%7qqSA5v3EvB zM&suM)5BVJcxiS6sK!pd;6+nl0?cNcOAE2qJX~{fP)@Atv)iY~^r`kRct+BrrE#iH zjw~Z8l7VCM`?C6E7Dmk%$(rkxCf!quaAHVe;LR#J$XiMvUaey*IL7}E_{@im{5>SN zOLDzd-TW1x1b&aZWHM?u-3}}~q6)(Zt;oHg&xxx;y1NR zmDTrM@N>lzsu=0M#_86Xt>=2+NvF)?L^)mM`Vco%{N(IH0AZ%__%zZ6-GaN_7hL`n ziuc65C4}k+La!>rj&NUFAcLSNYjkA%UNQS&ZXC-=zDL;kg;Xd$-a6Ad`-RQS(2wl- zy-dHp>xq4`XZ9mWkLahf6pu|HNZLBQ$D3we>ZpdpIk=q5(UOc~E02>5nLx8tiC>#G z7u>$>BnvtTgKTQ+xOs{rs-OkicCLUG*J-7rNOoFZ0v+fRczBTCCzU;QTK2u)_Lqmx zwO($%@M16)Dbi_rJ$)j~g=u)83>i2D#i&Y)g7S9+{Rw9_bJt=>klaXi}QDAd5@mx|EO+6Ae= zVYV9A)sO)EXbK?IbQKXbKMY?Pxv=ZsIwPq3=v@=P>#6&-H`_ffHu+++q$97K^?_z) zK!UHl+Z@Vz{w!{;a&3PMq7%05d8s0xPe#!V|72FIvt+BzItMFnfOfbw&fk(FrVI%v zW!(}IuDSm5vVF9R8RiXlKetpN z7UbmhETZk*Ok>p}ng;Bx(*-E#_qf}@~k76uz@E72ykh54ilwk@SMG_20>Hh2fv zL>=&d(^*`$t9-XZrf;gtKp&&tg699euqbk8VrVqP=ZVhG^1^(!{jZ-2S~=2Es=rZ{ zF^?`d9IjZm@k+*)yvMN`$?OJ-zN&AfJe0VXRocE|n2vUQB2cXmh>l@>O}=1PnO8Qv zSX^On7a5|wfx2&Wr*7PL`7I9v?w#^%rdEDodC!;;x{#Y|4~PpuYMYmK4~qOlw=>hd zs&0WBt3pq#smHi21QV7=%XZQ*N9`)dTLb8J)g~@daNp1;t>Q!DfH>2={jT0#ujatj z9PrrP*}5gakid#TfE#holzqRlLf{7sUmZLj3Mh>t9g)W~F7f%N{s&cW9oFQ-xBqW+ zhjf>KND4~BP*GG6J{H}GLvnNsHW~y06(yupR8+bfMmHm*YvkzB8*FUfeeV1B{Ep{2 z4*uLfaIxzh=Xt)~W2q|C`BhWd(X97o(Bl zc$z&t!Bd~`iblVbFk)McPuvS&RxNm>?h~|ep1yo@#Q;KAj@aNHJbpnz7yt|P1i5&v zTnDKAxn4fI#s{{N)q(P#hYqW16L?Fir)Li5E zD+4VbCzG#;8Rr+hAkXE{B3ymvV?AD~IK7}VD3*+8=$P>P%ewdg&?&-yMjz{;&~hY?~Ox!#Y}oh?tdzXKQ_i7t;( z1mUNX9z)hSHK}|8phj;UFX|rA=PV5NY*%DoL#XPo?XNc~ypz#=zYZc-l1nq}jQzR; z{?HIq#?wmrTej#RehA&u$a^A~=NPu3>md%&A}@rOl9Q9kP5uA25rvYAh_WP##B$|Q zjAH!135Om_dYx4Vo5x8u9eRsrDU1Gh{8+9?A@1@fZJivn9F7*B(di$L^!V#ssA@Tmr{eU%uTA+Lkuu4NOB@FMc0BvL_;gTB@2@@f$ zm*8@8x1#)xdO(_gfT^@Y^q^gGU;*XJ9L-W-BIx}(=m@Vkdy$lN!jvM4Q*o4sz}Cve%fHXoxuUkoG^>;}=A%9yWo@m~LN;U>0o9MqcG)wIQJ*Ela#VzplXqHgh21s83 zhQMFAVo#m@Gyyv6$=IW|@yH9ov>_0_t|F@jnmS@UQhMVRfge;`WM@xg3V{gCwpiwBVR!w*Q*8fi6nT+@-?N>xgVd;QtR^e_) z2bXLqh&AQpu*UxEsi9$Sg$vzUzF0K6poA-IJa%9*v4x{~A1vi;6<{eGjkOlKHuzbS zzxa3YqQX>8r7Z4hk7HM69n%0Z!>fLanvFOvs~b&Db!fMlkD8oZaDBl{zQa=A?rM4cU5WYHYYi!>!8ScDXTn{I%*7)p4^_u*)DQ>i%5>Q;dc;U$f2-#xpPNE?0C+m_usPf;WCnrnW zUH>^wQ-9*f8x8f$8rGFAJAygeyp9WR%`c`*O~uPQntohIb0H|OP3cWm?Dwg@$S#;na(7C3-5E$iomk5P6jKKUYF6^6~nI>n#3e#r#*9~)m z-rp&oYi9f)GBYFyB*~#8n#;cmpQWelrnH{_u(COnR1f|_clvb8Lj7wU1DN@Rv0s0P z1;y9RiWhD|(z8yR)FggEhoWGVSP`tQEm>$0T#zj6^9@om%gcyFa<)BfdgO%x;ipKz zIi!lY_mTKTI(WBpnR4F}6s<-Or+fU`Ab$Zp*q3j-X53bfF3)rHOL&^z{;HCXtT<~4 z5~m%c)eBOS0<7aWfOS^@d)kTQs24egu3PXo_i==9gsJdF=!?7o{W;u?+TatOh zH#gGdyw7gLLN7tEW%rlC8D;O@;NMZ83Nr>F6!WGf^&+Y@=<;?wUDJSJ#9F-K?y-l6 zqjOXcPqJg^S!>+~>0rpc!N+CKe5xsD*K|Yi$QmzX`tVGzU{TpaEpt|>mt*wz#iXul zpY%H6bb69kEy=()f7Pb@Ms;75)J0Nx8QP$I2`@Y)2bhd5mVHS9*ukMesU5HXB-KC; z|2h%tvm}aqYp)!P6OnxioMVcX4P#?U-tjEqwx zF6@eJOZX?p*D2unP|^>ZEQ-sf$U9R%qb26|k+lv!Ly}Eg%nfHMd>*`cJ@P}3(^8+ zV)_CW(pziVc4+s0pmS@`xv=>O6GHECTelv;L(`@R?fQhb>*B07vDOKB)@>GO%O{>Or?Fz> zk@M?bGB%>aT?`17xNq|9lLtaAQ|tchbxRsng7uKSs?#H~5vA+4u9Hf9Ru zDvR2aeY7F=i>-xys7uMQNU@hS3&{XqWtH6$c7}fQ-sZm#LH-Y&_cLO=huIL;U zT)nNALBsw`^Mbzxvl4SGcJ(?W7YDrv+0ByBDn8kk8Td&{v^J_^c*;uPwEBm9R3x2& z{o=lCzqMoesP`mj+RdNs>v*be^MXN#1Ps(PZ>DBf>HDcCyN9@?&wb*C0i5(AOmZr0Jfk zy|9l&Sfp>P`;MC2(G+E^E?G2o+jEJ;l0t&|N_`8N2Or8n+AD0o!`Xx(7MNhIgU>vL zCbRW_^3(E69TO|WJQLr>nvv+@vDleDH(JPy#r}NtvFhAd)9K_$NQdieiIv5vh4cz* z2Ng{$efN+w3RAs3z&vTX;6z`EDprx}`!rBghUn#Z)^;&BzObVoK^aErIi~OG^I1}_ zi1V6wi@hBDhXqylT?CZm>E_mPH7Mw>u6H}xR@lwaRmPU^?V5?`JtX0zg)G$WP+bzv zU?SQj0*~X3oFFv-**rfao6Fj8#jHf#A>_Cm}s9k<4KF%_m;i)s%^jBx6cuzeLB9Q zv!~^^Ju?56*mcSf-;1>niT;Iby?8dz-xpK7=MjI2)LZ+E+EqOk>ji6WIp+y)Yl#M} zx5%4l?_@g(_D}&dBT)L0+UhRmf`U%w74wBh_D7f{Hm#7tK5M;d9)?)l`0ZSVees$q1rCN}T4#pTE+ z$uf+n-Z$XPtn|(9O;5h2{w6|Ty!Ohud@L<&R%EO9+wL71Q{b;C9@4F}luwec4GN3v z9oEwo8V*c!u-}gRZ4yG2oW*FkJiSN^_1Igq}-<h){^KW1ElU@ng z!^qB-l-$s@2(rcPP}m_yKn7JXbg2@wjDE>$9P}_2P58d{9BhYX;GQZc69=o^_H%cl zR&!~EL#7|QIyPoxWVjdHka-gT)138AJ+V*RP>GZch1{~%{OHhlcrJ}`^8YX}e8{?o z)u`j{VcVpOYt^_NFB@28FNv}-*H|YvsT$>Ri)Ru_GIa$7JgI4>br038ZPJq6uBE+N z7*P$NkR$O)qIy(vF*`%xKy$Y(V_BKO%JcJ?l9GxRHp-PrG+XLL@8GPMX~-Xk0`aY4cOMc^+<0% z-VoWHA`#(QNX*;Swm6Y$1JqEgu={3nVpOysZmtzAi|%rp>90O$YMoSd5BVFT5%Z%G ziFl@cZ(E2r2#vC(*WXCueX#SLN11nBLo^d0p{ujjbU0_%)@z5RVNG1_N{l@p;N4uP z^ldyYy8AwKa#zC`0S_Vx&(SDi1*cbimg;$pT7HC!kP&qwOEKwXAmRo3U>IMg!-R4C z&8xH#zGO!{XkE~cCc6j%@0*;GlRP)BJxKX;@N=8yV_1}5Xp1-Ew2=Z9)MlW2Z61E* zbw0Z6KHZuOiZmcX?C3iD%W7K{%scYk<+FGg%ZSg&=cst=gyxgohjhmR*>!moL<4f> zFI@@O07iq&+rzSSPUjiV<_+QKrr7G0P~&&2J{|Ab#35gTFeO3(N4O`ons*eWo(#$= z#I~vR>xQ5o*WT2qUr1>)VF~d0)(Q2x9A*BiU{%jj2$b_X|B+sSC0)d>61I`VLIJ?z zo=z>6c&cwM5$j^C1R}bLnKFN{-|@=w$|9#4j^H=6r*m;C)`X#!8-`18g_q;6UEALwjHDN>biAP4Bp=&Mqp**J2)>3Jec3izQiufyN zv`8LLs>png+1YBh1*(WJ{F#Nx&;+}Z`ckl&(cYyHZe)N zPv}mjbn55>s<=v!w^In|sB#iR^d{NlrhD-hD~iIzLWtp3%@u`}3&9<*&G$WG?jEs9 zmqi>CBKHYo^|Gd5SuXzyi`a0UmUFt8CAu>~(yXQ^4w&!th z@7yC29j!pN65{44o-`kM2W*H(mBLSo#Q|9;LG|n}%m;==Zx6^qa_nU(Zy{AtxI}_A zdQ^+WdUb_k`J?#Urw*RHFc(^-hmq^=l9j&-y{);Yd99v~+pC#q6(6ZMBinmC#)Z{@ zuKF7}y0m+jJS`JWRn52`a-~3mEe%OKAI_x`3w2BHLhk zXQ~o;EN9rqeeiX*UN87MNKfSfGtgF z>;1^PpBburc^;X}IT#V%L@p3+P3NaNs4NkewLCwtOYgkwFm45;!pH1;z`Ql|!PLh& zI!!EvVP#>cKaJJqFQS{HgUft)(Er>9PUk}2JeFA_1EoKtG5e`oSdU`X%=jycpim#*Voi{PfMc*WvPz)e+iyAxJ|EF+_;+H*XyKo*qfo z4*K_fb7mv7t~9lENnV}d$%EN@5v6qwysAY=u;~4V?brdAXo>mx;!gv1}f5rRWvn1*3vJ>SNTKJ@c>j z%%-=dBO~TDgC5AX`Yi@8kqD`**#=cSwwJVp-S+xJNsk)iBUXHj5_;S8WN4M@sx)bJ zc)|2EpX#rwm-ac2BD1yk(;U_jK7vw)5Yf+zNpc7G7-|`65%v-s{;ne98Cug>+3q@Ue%ci4wwSs|2Kf1}gj}dm- z&=?NG$+lENTgBew{HXm*-@(%?Q+Mi2#-jY%p<%sgB=IS%o5C>paiH)?shVwh_X(=FOB_wc|TsQPqBbtxO-)9OuIGmm6#f8(sIphS-M5=bXv9QqY;Q zmQQePPD=nC1<_Nj&F2M?hAd(9mfW(g>F+L#FI?P|ha3CoHXRqJkLonHZ~WlWL=wW- zh)XUddV;j5i5p}4va>F~%QGiqxl&3d(`PgPG+#TiP}O>!%k`aSPMZ$b7@}GHXG?Qz zcAbjw`^{Eb%tOi_YQ~5;qHVO*%f#h~R>okjJx`xh%3*8JF2{j^{#?a8tX9h zgCk@gWxDHd-OjsbN}jpltqf$mqHi_Zff;pGc%BE-oIFgOZ_4WN2anJH;&y+Fp-!c* zo$SQ)Uf8OZF6pHZI3JMjWolsXhUp5`FA7a(xQaG>H%G*Fa+)p4??`*ASMS2}lG&&8 zv{_5Bv@M6hedtGT{2CAwtTIYVC)25HvC{R|gIeA zf_yTVb*c+lKf+IHV2)1~O_sG*ts^F6UYo5_fi(Tp1IVrhPbf>Wqzm?9p_Ewg0%3T% z%GNgCecymLGde$@tZ{gc~<9&4AJz)1xyrRbXPs4QSgK19u!qRqL>1~B}z8pZR*#) ztbnabq)|Y5J=ee*WW4edaG8jr14BJ}DeoeO!T^q9o-^KK4)R(1?cB{1p zeZ3XYB{{9Qbu)VL+>UUFoWC_7wgJAV+jij>G@ox>c0fnuaZqOStbGkkg1!!vIS1xPkblLr}Ma*K?XtF{K1SUMII*XI}i&h6v zp92&Dpx`!XHh4*Su#1+quUYcq`OJJzrQ2$9;A8yu0sM z@89{F`55MKL;U-$_m;QLu|6&Ab5mz$&sP!QC$?kUj#=Ovst1vjL=Bnn^vc|2X;VgX z;xQ|V6^I}i6zU$gocdRp+kpqFCI!yS4udCmYQN!$tgr*hxt6d-E*>5$!EVp02S7Lj z9SU3Uwk4h7sUE`vpo}Q>_i)a46i@BnQV^afhQr^=UEE>NenL$}j@aZqpnj<}%;M2d zBBB$@#^TT#H86QkJ=CW95)uht?SaQAp@N5H5-H)Ic%O&zKnMnOhs{t7Y%b#V5IN|K z7PVle+42Nd6$lJl)SZrd5#EtLp8Q*Uzk~k1>H-O4U`()1H8|IGIfNkp`=Ab zx7)<@wWhb*qkWYrX~A3EH9*RbiR7L7l$tAyJF2=TMcDfswMm!L>vNQ*xscmcuN(=a z4MjR4upl@QN%vY1L*q5&C{9UhwY5fO$RwBx`=GFJXD9$pzHGSjOQ!aWtS03D>6~zK z?YT#3Mm!zoCqGrG((u66t41c%$qswLn6=BS)IN35hoY}EXT^WEnF)L&R8cm0x8A)` z2FgjqWV(#}(3uGsA1H8x9h%U6``+2rL!tQq0I6a~6;21rJ)niKIr9u45%*fOg8;;g z=X16PAGqYKoue3OivRo`Wcfto9zL(GQ70dp-JjAD&6~9?Y>DsrMGg65=RUez9^`5f zjv1c&ofRj~IZW8{PF!wj9iH6q^7a2i4S+)fv;0r0EX3z8o&kHOZcd>5rz{(`i;D|J zfd__T=`-&uK(pR;i&zR2E2SXmd7G|PwM*eGAhFb~9;~+0UsPQ^o+f%}4?z4d*CUis3+q$i=6V0}R6S*^QF_;}OPX7V4_sKeeDawdLfy05 zrh6)XYhTD@l3Rcad^k!E8C9$t`%?+PB^L9}9n~cX-GVHE9MUmtDAr24HE&52@mtX; z?rpqb)MWs1@=2p*7!d2V*n^g8H&HK3WFbts75`yEHuAU@wOo167@8tA9=v6&BY%|G zYo;#c!`Az@?W*-1f^}030vc$yEkb@=KVJYb8f{rj(+Tt#^5Mu-&DcPZ0DJO5iFQ0; zAuE=CCmNS7Z}qCsthC0e)^q0fvjRbc$de{B7Q!(diu{MlGn;|?69*rjparQ`K3*3?|2xuM5I8*Mb)Y4&y}F|fM zaca!EtPO~K=+s6l*Mjz-=yRN|{O3nyia&*ESkNPI{csW&?SV9zi-T#6$?SSOhdnCH z*sT{RIR@Yki=H#AQh#uXK-hk8zE_vR&%cmr)#cA_158 z6vNQ+bxUTN`OG2+yX}i17*%+y2wu5@4WD#hfe0k3*?ctKh&oV^c#sPUYyUIibx$IG zVnO6zFTn6f}cU!oQDh6zKOf@`L}5d`4;kczhD z{GN+}sB8>n%=z#e?kUvlkZ$waL3gJmZima%o@DzE9lsplwr&%>KA0ow1ns0SP zkUm@zz#xAP(H8tA=-%_u(_y-(OE0-`KO?DWre{g>1j%d9^6*6WM&z@*KmzL|^1c73 zJnDlK)0eLBM!fWX7MEJTO`|ssVGwYziXFicKI$X|Y)~*clfe{Q_LpZh+*KA!2tnD_BX^QDq3?DF8s)<#Yvf=^!3lkiBXm{a2NBYL!T~TKA@xuA z=JW4SMCWM60*M!Q!-|U$sMT2cIiYD|o>0x0TaN!6wAZJa#@?=obOw|sq&tbe4=3GW zGOlqPDH~<}ING&Tzy2k^f>G!bdpK_M>HUms%FV|Q5J54*q%7Ne) zb-p_J<17m+93MWAqfMWMD5qQ9yZGxqUiLB%{{W~LL`~O^FWJ8#qiLhMvIbf5Zpo>^^d;wut`5p}o^wbIrV-Z&RsDzZ&lAhvj`|%bX!s>|=T>Ik2 zmeZBiaXYWUo9(^lYiHhU$%9t!0adll>3JibThUrQ!570og>s0nlW@*mJ{qMf-hwQS z5c@QCCiU}3=iXnm*@N$rfyOLfARq3DSMA1@2y$u!9%Uq&A|jCYc(s4H@h`02&n11| zehLU!IgSOuNbMm2xWV4z9gilD`X|L{LJRBJB)X9bBd}Bb1xTX!J-Nu2T+?%jjQSA0 z?KNNL(32t(x8t1p2P_}7gKuPr|bIWH>ww+%z z&}g$RAapK;v+wRKi0KS>dPf3Wjkx|0nHqYJcAdp7wbq%CLqHL*@AZq?#X!6o2iazzNg=1?LT6PtIy4 zYAH}Amh@%tVtLw+NRNU2?7K5D<^et}T>z8_qIui$@td4_|IRydf5#kofG9h!oiz7K z1%#hbKD{x~50#Q)T=l(QZG(n{bptesOe^jt#!G;plx%gE#N+g-}onQ+WY{ zU1vkF>0HRv=ULa{=7Na@p79lB`6~yoXI43QEm;koQNV|Y3T8`>IU(8K07S~|OMk=M zn(26|Lbnp&bND3LjZ1MNLThm8VH)l$Nxqo6;Q4~Nwy2U8#otdxQctdYTipxj@Ru!Z ziT1T)M!^N7_-|S~1Lyc+TW?aJ>^?PjB>)fNRy1l@M}fm;f-%B8hxU(fFFh#;R6>nf zA;lZ_^Q)&$EFFUyx?&|8z%W+#prh(^B(bGe*iTt_Q^b!A9%30Q*)82bRM0UPihcT2 zIMN1KL*F#1&R#|)acQiQ$*+$$tIo3#cqM68sbw`EAD;>+`z*4*zrs)5^EmUWghYq( z@^sh=*;IDW!CEX4Tph!?%Z&h;0AQxqCvV*$1lo5)){m&c8Ia9!R+~H?3?7MrceRkv zB+BsDMZPc)t`7=HAhAwi@6wTCyqaGrb}ZIq(fhAxb2P4W+TlFUxH#N zNrvpyN1p2*Zq)CFF9Rhaoz%|qBdCC|GqYmLT3{?U-uipb)01%_EuRr!iB=wTwBP@7{k+WfDucR$2v+HXnQ5NxfG2=f0FhQ< z1VC!jMEjyzlY52IciB*vCO0Bp_vD7T@U%J=+?d(ol+r4Pr-h9pC9c!uH;)Z&7{7>7 z$ylx)BKi2`Bjqy`n?1vlA5<3R@Dgwzz3yM^ zOkgM@poI{vqJJ$Pfhucc<2)7;D6F1jprxWA;B)vf>XPwK|92*N_HSi5zxxs7q7jv?=jiJCXPt5qg`lG~YARcO zrjdL`{=7YbJkRhTK*ZT2RNC$Xuc&b$+ADm<+PY$+w>SNq!B)m`N-W$y0(Um4T67L{ zu29-GiLhB?c3lh#LgRe1G5h;|bDaqKT!gk*u;#lPIUC(lw3$7f-HudTU;Tg;OZ7I( z*4@0y=?73w8l$b)VBoNAUJJ=CL#eFYHf+aa9vX80Co4Z0he3G+-1IH`gq7MT=ZYc; z-dw*L{%iBvRImw8XiG=~Fr>|Yz;}_g2jRvmsAdoqL&(aRDpseVy@FG#U4I$=>lbgJ zT;QVL^9TV*rD5a10uNuTmW)PMj!Fl0U`J*~a$9zWUR|z2xL-5+d-%R_QOEhSsLt=4 zYqDJt?nYT0RI`>@(WZ42x+|n zkr2bruM^^L6nBjh{m^8+F?)SV?(?bBJmDD11exx){teK77+d0T!Tgm0h zUa2!NacR?dYh7Yj|0<3Z7Sk7v1`1erRO-+5PITl2D}+9w#NPQX*22NR<|ab}OXnVq zP@*-CPCPJ51)^U>;@^8J-!(IUY_h!$UpoZJ6` z;}5^^FUA&qZSHnlhUKv_8-yZoWJeY^%chRD_L%9qJJmm}{mFJ(E?}=P51VHBpqd+V z4SuEa1GDylpRWrRi3nu69KNUL7D-jVnTd+5_OG_|v=kFOR}qRgU4kLADM2)+QyZX| zhct(#bJj`Pwq=Qd$re|$FA}*+dS1r7sCl{UqrOvXesAxEK0Tt+56+rR$d{veN6Pt` z6;Mb_kRO=0-|@{YA8s*q+j1}a@MjgWTtErG7Jhims?sGC*%P!*xjx?cn296 z>Z^_U9i4*u=j{~75c>J>6a1qHjc)+V?_DdYCKCv0s+CwRL{`|n_M7pgfr?V!d!TZd zHmWloMcZ9l4GoR76y0rdtfT=$f9e~lavR=`Z^d;f7Rj)@KRpVZv8Vesx*U?{%Q#!H zD15T~B}LM|Y;lZc$9?r;qQ-OR`bmtwmW}X4>=VQ7X=Nk;mZlN`iCnZ3y7;Rbov4eq zbp)ig;`#&k4B+RW9Cm8F3k#h1KI~!<4vcMG)jRisW98NRNWUrndHq>0keEWkL1{4@ zRbV^*9o`6U3N&(mj`f#c)I!X1$K#B<*?^ZzQD`)VG_hnZTmydjuwnO@AJr0U$Y`@P z`=V~R5hgF_qyaG3LeHko5mL2pmqP2)4f|lt z$??96Y7B`ld$-d1`{QkdCQ81Dad)Z_Ev5ExTC4RuP=8CVoae^th;Z$st9M2NJFfx2 z$s*NGcCZMo-x}zhsu;fE=}7oEP)inCF5f>CC?(W-kxpU`2hFuyHj8Lxq)S~XXwldP zz|P1Apm}ER6xqeYTCig#D2r|i0KHXhC!fk~-$aC+JUj&euc0;Ew3bn2B36Ueyz?e~*_oCW$AjR!6jG0U|Ez41O-eWAfUj>M2J&K^R!UBw+| zr9%}32^OWwHk=IG=hq(BgLbs>`d;=vsym$0!S2vk={v`Q$YY)z*&%yliO-bSf-OUK zsnO2Pt_FJbeH_PS*k%q&I~1H*W;d;IK-okhtb%5xHxri})0Hsb%+RR3}@LFWlb--SoeQm`z+G#xFk0ib=YkQ-f?RQt2bgin|i-=P^ zga*^sXr$bTyq~C_2b4Z3E6sWm)T&1XxIep)r1~*6>**M`IIA7`SrOIJ7i;ZEy0g<# zRnSG5=p`5h4sDmY^Y+Jo)_4E2%R_wULZv(j_<7j#3qX80@Jr|iqUyS6 zxE;CB%5rMG)_}CD<_H}|4{w>T(&&#wV8WKh=aPUcwzNxZA=0=#=74%A1x+vAFAf~W zNppFcMv=umWM3^BO@BE4K5mD;+V@~+>OH()1H*e$BzWtKD!_e6d?pzM+ zDnf*3yUpo_N9nV)xgfMr&h!(3WGCe#fCL2u5dHb=4o{0OG|y0Resm$MinZrp3wf}R z?T2DkKA2{?h{N;x=i6$xWSK`GZCk=}I>Jia&C>>H4W4dM?LKZt)esifxwa}QKLfN+ zmxG5UW6skk$#IrL08te{V4N-7o7jsvo2RL>ZFBgLDr2FB8x#L`A(~-Ku4Ttv59g_H zDS{e~Ap8un=-apU2p=*JvTDi}Iz;B&3d*4uz*6JFhwAfrSR{zB%o=boa(k{G`&u3B z2|+SMLkgVgea;iY&icgaD017P+m($j+`?Qces9Tye^3UdcUmybQ?7EXvVWCsi_tTA z@QCVa21CK+P5m1bSCh@;Y>b^k_j9#Vl9P?e8L(XzYUK0??yIS>;s|U-kfSG#b!Ir1^607L?Xj$zkipO&N)_1gp6|D&OZn z+wwk@kgi9pCbk&{=Wi*yDBgqj2obA(ldqkOauY$t~<`??h5Nu;9p_ z>1G%+o!P!O_g*!Zk3Hhc_Z40Mkz?QDtx{)flTeN8sHxkIXZgli$Y#Lsb^en2RU@-Y zn2R$DDm`=Oo}-JDZ;4AdA;5b8Y59vS4pOhsesL6|NooW|ih04Y)u$$-i;0JNHwyX@;- zBT;-)gB?$Z(Pm$fk_J4o|8%L4{VRl96nB_!ryfTP$hhYYv?rXDGzYOnKcU#3@YX8^ zo4}Hwv8=J+C7vBV2WX4ed=2FjiA2HqVV6%+fX>PmP%Y`jK&+L&3<7TzJ~@NFCw=~0 z?~nENj+-@zZitT|A&oL^A;pQQt~V*$T~C^+;nZuoiTTx)q44Q}qN+8y`7@J7m(S7j z0?QZh+9TC(9=X+qXe0o#3`m5Q^7ga9u?-$)SXeQY4o$6dL%7XAOn2g)4JX> zpM2vPP?6-bWYa|z!w5G1qFBm~@=xCB*`{*IdzTUYknUdiVI^=$O5zdPSu6S)&CS9u zHRmId9CVKqKh>SglxDDOYEiN*SOkqG;UB%GzZeeXaLo`oi}C(&M{Dp_&0Z+W6WKf; zDW12SA;Q6;yM~bL_e%H?kq5XBmyT~YAI3p`BIDDuGe+X7K0F(DKb=iVt9KcN*sYTn za+Pw^ru3-5^p3lvueuaCk?TX>TVCd@?sz&&mUQfR9u4tTd1Rsdkg6$9po(_ts~6+H zFaMlo*$C)|lQ*@tDzQ?_SFcJnqIdEk17LlNh9l{M@AWM%XM2mqMUbC*T5K}W8`|@= zUrT6GU(Z?JTq-N0s|tHuGBl_+S3y0htezR?L46E4Nu*S8>=|#&4cz0(r}G^fwy|Ck zRGTgf*0LwvG>rbuBCs(N;9>Fy^2%O1?J|@SH`;3nf~B*HD=+3Xos-j=pVF-t*2BQc zjJMX>HG7sa$R7D>6W#O9mWyp!E#Hl|>l+*7|FE}Z$9^I}!f_SKfA;@w>BBB4U_`I-$27hm_RX>q?!wau17k{(rrWmE?8T@R!ykCOd z!;CM5VLl#JHP|W4C-0PFM;aA{exeOKpxEbUlS@m)w!NlP>{8~WyHy65yn8fU2a;0< zvx^}C)y;&z6%OG`q+Cmh|GcdmD1;Fv1@r&n+WnW?5)%^Kk-0i&lU81QpmDCLtWLF0 z5@YNqIf0AilzBpB3yS+y#agNkgAy_J{&78Eu@ zOiY197=(_EwTITll>nHc-#qPm7@o$x@@ZLDT1r~e97KcAtzFlp$_yiK|Gj{W9b1ho z@7fCtv}uWchK&5-Bx-a(9&dW|F>UCi<*MF=IpExSwqaJ#8V-bn`sv@aKCU&_o z&?~q|{3MO4TcXj{{d+W);1PUw-|&rLkLnwi70e#SXX`*_YjzX4m_l?Pq%48>UHQNUYWb5C&I2ZL3T&J^<-n?@A-bmXBU*1BO+sU1Rb2=usg7H(2oLW7v0wK%VRsu+6Tj$TTBob&6>X7lW!N1QpBmCfF zmOZ4DkEOZ#M?h^oMKEg&x2>d*=BdU}cVEQ<70#>H{KWp--rT9YRik4^5f#qLip`;@ zx`QT!c7E+$Qx{~122pyC;Xy>^vv7pG5RQD&eOH9IEV8>0xa5z0g0bC}+Q@Id+LRX= z?@x7FSy}1P_d&A8eMVW$;oUIanuifh|6*O5oUqc{$H`-ypHh#XM z{bNh5G>kU7Fa7=5$!F%QdHuVeW#;Q(6ZH=QML&p12+7G?@6Vs{W!~U^&rV-wW@`U* zc}O6j;7_{_qtY0Y&97(UDn&u(uCzr<&Cg=!DPNeHS?%Srno-7^+}0`S>g3YwNbk7U zLM^H*&Cd2{KdOxef14kLXbE~4u@^#=1XDeE`jLKB5lyG1 zZz%&nC)Z@M$aj=a0NBFO-TdbG0o6aq2k!?=OWHJw^mOfmBAkMezyHSmNLRG04GJ1u6^4KDoT~LB7Q7;U+T8f5kQ(- zUpzsw4c3m2b_*W%>q4stEmZKuq;4HUjqke@mHMVi;dndAcaV*{+pdOl#ozzupQ^#k zNWZFTtMgOY;mfSi;*3&=UQ2m~_4~v=8b2z#jLf~07VlGr;Vt2#8T43xTz=s7quU*f zx}{M5y8auC#KV%ud76Jxqh;IG70_u`AF!u7t9rk~hMd=Y0jz3G zI!lgKUC#KWuauYSG<|=u!+-c%?c1AK@ej3cR@5`_!1wPu@!Q(DE4~&TkE!|iBrK8b zh8XaP!Ds#*$RLyj0rs;z3mK=2-%$12v@7?KpWWcKKF(1mJ~m=#pgI5zm=;;0joj3A zhvKE(!4>?D|f?Hukx0{5MlGGNO(5hphvx07T{LyuA8wuSESDdx*5# zo!qODVmKSfP^QbOPeR0o0EPP^VXM}@vuH;XP%RBOM~ zb!q9pzTIC`Slhwn?8qd}clD)}rS(tWL00B_R9EAt7Z(`F#Kgw&ppHMEJfyXwl_}mKjbuG_jnZQPY5tFQ|DLs3H~=Js(+MYCVwVDu ztbW7jXyVK89>3{=Iih&oD=^v4G!>?Kz%h$fJq!=nsxS#!%JM^-@Bj(A&OsvIOpobN z=*;6Lpo=wQ@M=xXO|9B3Z~(?%VN&7q`a7*K>rjgD8BU(+_raose!mY&!QtIJZ(APF zl3vxV9p4j8JO4(hgN)ETxaH#j;z${tG0soYT&ty=GOOsSr)TtL2?C!C51$20#ZARJ zT^VuC{cRzKn16}DXS;VaOt6@F7*p8aq-Z$qv5#bSu=etyz!%aH$(!HCUViPApFe4M z*uOHis)Xj4{pDcKuwER0k9|qIqM~^%Ke&RZyaIfz$E|laEh!!Bnv&}Go*Tr{NYyGmWa$WDq}FT8<#@az`ceR#9M1hG?f-jE4!MZJ&$Nw8G)+T3|~9 z-}WrkV*bqiioNC2zK8!b5rc+q-MU$KIpbO3u!gyNW>;ae683qIt=5^!u131pL%u}b z33O{9ZO>a905`63z_&d#Esoc1fB4Yd;OS_0IbmjN$jLi0RC~6w^JP^%&STgOu5c1; z@-v&FoOR#@fOG;R6k*AhHu5|fiYo&U^`6g)nmhx*-|>_goBVG}kRF3k!GBFbnp`g| zpW1K#ch!DsdGhipN+%{Kt7KI;ih7q&pu(-SCvk(0jaFSj{7Mwr9*R)rwuLL7Pd=e6 z1?>DQO=_ml;FjjA`{@oiAnXW?o!WoY$Pms=gw7l#4t|uHH$9Z4ma{|LUud)mf{-<^ zpr0J5Ko$Z%fQgczTL#`n`>sFD6vAzO$5z>gf_wn@E)4B|E zs(ybhg`_BM2K+U50vE4eRV=>;`K#D(?Q{;=Q!LhWTdz)+R}deibDFAIsE$?Dg9!PD zpIaT+eDBhivJ7y{l>}~}@AMFAN~Cr~c5A15sp>AVmL3B_Yi@|Ajyx3KSj&_51Hvzd zCg)$!qFQ_QdQO}qR^@N0j&n=NQEZB=hvPp_&Y8aHqZZOB&UiOMwnM&76Ha%g=96`Q zy>VU3&YR2%67*%KXuqLhAGSDdnq57q@FM5_^9aVtxAcI&Nj=WZnTZ3G0Fqwh-U4+v zTtg5C+$z@zn!cqxj@~MHsL$X6Wcr)j%X_J#OG^w@KLGy(5BUZ7Y0D^;avcDOWGf_@ zzBUe;iJ+vN_u^ahrQbuZtxb>pFkFxYftG*->mKmiJ~;|p*q?Htmw5x^5vN&mb-}3_ zI@Uovn+(PuEG&Rxg$cOKQapRQpz^k#Q2j#hWu}zfY4xpuqpZ^GMEEr+csGTkNmTwV z;Q5@|0I*xt9Q5Ud=79>ov+4ii?LFg~Y?pV@B$UvkNE7L(6zMiPK~zv_Dk{Bq=^dmb zbPxq>6a}InpwfFMp$Q05rMDm@AOu1W5R&uY`>ws$yUzJ?e*e8TU-**1^W0_Tnrp6^ z@naNHX=j|)jCrub#~S?UH{L41#eFkDZcB-uYoj~AI%9v`6`OI?N?P(aubWClHsXRe zlu7pvZx{z^ZdYw5V3m3!Uegj7mUxkqp8R!B=e69cp)-vQ0Z#R}m@Rp)*10PxA;eE| zUPzJ6v;%CV7-AN6_!Efe;UT$Y(Yx*hH}Mm~fPMf5VS zDB$(JKIxszpv3o0W{t*a{cm9&03pKS#L@h_5Q0e2`@JMc*h96Gtf1ORv05%}S|8}N znvu__^a9%N*|+|*ZtPO9o2Z(UoF!-Hb5Wd8-e32xT?lxCYbVi?8it)l7=bFw`rex^ zZ}aG_|J;WO`vDIz9UVPiMk4)X&X%ih7>T7ETwHOSQ<%+Qb>_7FFE8U^2*jkPnGA$T z5fPHi_WsFa@>=+Qg@-&@IgS}ISrv->+h<#ake|g2dDy98HFkcv z;4Ae2Jsfnw?^N;a<u ziBj1oLMStU(I;l-kta>5rOX=~$H&JRXRvJxJ~eOia&r|$NR*0UL+#xAPIX(l_JGaP zf3aV1z<}4wT~~JD-{5lJQ4H;CpDhYc&6-8X?Naa1m90f(DFv5{1b}{Igw}WbPYhv(nAkF6um7?prR{!b+z_tl? z=ZQyoadi~w1+y*zfGoS)2t;Nr&JDeUG3#IwhZP0kgGMtk??UmDM|WUNjkTykRULEi zZ;2D0c)>YoFWn7*u|V^#yZ_-2C`K+4yqJl zcA)2#OBA$SvhbbNrV)u$$*Kcc3(X9%x+OsQj+9hh0g!70R|m+ zSzFskTs(Upfj0{XNTtXnC|??II6c!>qlX6*BR~^J60*YMBiQw_hMz(nCKP_fi^(D0 z$NKe?53S||?}PB*bg&1LEdPS8BsD{c>J05umJw{X388Q@S`8I(7@}LC-3tAFRMm64 zr?jW7z3RMcnqA()WE?lLb&kXh!bk3b*Gy73YHm*&P`7DN^HlWtgwS6PoGpz@B%lvI zqGBvV*5gf8&nGrjP#5%ViuTAglai>cqG~61t310*i+2wfu3mGSca@7nyrg<% zjS+frTR&wZy>#o{18yG6clX-oErQe1^~nHlqu-v>b2m(+2$a1EjEogh*U9jfDE#up zQ#U7)uu{R#&%Ytuufs9M9}*52&}aA=VFcU%ZT3XC*5A`fO(o74aYH-#EVy{Y0U_-b8 z-^=<4;hJ!w)M71^n^5%t96M8b)m@{1dVk7{zjk~1*&~rF9e?e1f`OIAo+q5^hq#l+ zCDFTn4@8==r+^*KVg36rKM=svQL>_Ehlfu&ejTP^iH?jmq(mIk)Zk*k6HFArKQ;pN zeqBn#o?h=h4nKv-qDTW-pI#$SI$3`)O7M^hv^IZlHrec+)%VDr(3h z@6GGmO?@+{{c3PSp9;^<-o_X;GF3z}^IMm=fv)7_B)cwIC%^2fpBK5i=R#+(TZ_Wb z=kiiROX*;BV&-Ue@xu`E(~iANjhJdryN!@l*TsyO6b|n-r7Sr?wAR~2uUt` zZhNn-={V~>fjzIN8)?ITM)GSU9}gW?`TYOw4;RGCe+* zDQ9CnFCi#KH6U^c(}r{$KfWY+Tn~8I0QSdwIp{M~ga*MAZFj2$4Te8iogau! z10W~+q<28Wyw%jo$=Q?`xvLrK$E}UwmoBuVB+$0SfYtS4@0NXzJp-NJ-}5)p*NpIO zrA6*vWU@sfY!j#7)C5z42%ROO@y(RspYe`B*Q}C}B4GnJeX-&WzF-Kq46;YdLzodUB>gC~_9Z|A8}YXK|uq?Bj+I zIyx^Aqd+7pv7PIH;}!V;0+WE*_DTWx563%suu5R&fU%X;N8C)~hkD{&(DK9OD`ts9ttGv|n-?b!>~*fCdzblWX>t6L^~ zedv%6nK=P6a9Uc-e({SD099N>Ujr}=h!i)%?aD8I!d&Q@tWg- zo_`fS%s(<5Pl4F5VE|zzNw$mu4%{9?x)7jF0=W47ou+sq!0RJ#CVBnL0AiUThpPyfM8UpOy@U!#Y|MMEG&k$nf5tW>nm#He^9W*7NU zvw&-4S#YvvDHwLXK9P7AL_W7Ta2ME1pyw7~`l-+-S3$4l(r^3&j38aWXfB8p6MK2* zEf7#mIKE&@{S1te{!^#n*f_8krmqHqV=5kf|7R*dVEHfR>AFFI;zxy|Kk{5}VEFgZ zNySI-3^N+0kvu0&iD%OK$OL>-8BA4IUtd^TJKmeFV6)M%oiIEyVhqSb3oTGHG@A|% z4ko}P?1ayR&Tjh(j2mMmZ#M12cG*By{VZ)b3NX^-Rslte9 zWyG=3XaGxlboXtaFoY0M8oipnRQLCRyhHb2J&+$zs9Bm#0|NsSBcn85A0I<&>nuB4 z+p(-$@e-=4GqoG#lE~je2>4gJCvY(tZ{TPl6$*XNpIoVv{BC-4$|c^qb48CHrgzX78x2E ze!g%a&-ku!T~2PU2~8#x`1$twc@om0hqY==Aq&Vga80Z!#Blnpfc(8$)D0wVmb{7 zQxEDsspO~$u>G|O!|eY>%P(Lca0X1G2h?Ma@7Lulo|%XSZPyM)x@gZZFbOJD$mJjpO{oEKslhZTCyeKn-99R z`{))vzO4dYOV1E+&`l6arQ0-*0dQWFq}afACI{i~N{8XU&=U&-6!pwE_;b)~|1x20 z`7iyDCwE9MJN53}|3GT>e<8IoEhDVlDBB-gUNmg9Xh5f|`)BRQ3fQ_)Bz?I-T;vSh`3#KZMiK6y$0A9 zRI#@5Z9@=wEkFDF0P!*U55-jsg%FONv#&m|%%Us#!EzE4WbJB9LUn-`3>$hZ_8+nA z2Ym$WHx(XgR{A?~sRpofRQSHliv??1VX}1iK;U1VF^Wb3W=SGS6(~<*UN}K178VxH z&?X`chj}q#1(u_wzFf3CZA9JdDeb zR9WfOUiBT|C7lYe11z&xD~M4HJ|U3J^2-1{9G&!kg1-KXpzo?Kjo$9vG9 zI@KQhR)pU*ye~B(maTtq&>H8;J2HFO^8VaFGAiJU9YD*(XAR!D8+LHw{Vlr6c^9^U z-8^`DVY%V|io;h+5|9LuMdD#XUDXyMX&@GjxP$$MUJ{F?6!48R_|Ta;e;*(# zl2TH=LfJz6$bJU5X6O4LG6P-FjXPmRpH7aaRS7Q}8Ww*sw@ipC`fsO{ANgOjDgZho z1eN6Fb)4nq#_i!IdyQ1DT^o7z`n9Br%5+wCHt(5lRl#4i0G1_vr+;R|?NmR`%PDU9 zbB%q*#~&k{6HXuR(@R`AUEI&?2%V1p^u8`!UlNRyrdk(Qvz(Ru-0es`_2~^M6TIpw zN54W_9aS1HsRW3b;^`#CMY>yBTc!geQ7xmvqNE0 zL^Crpzr5d!=>+%5A75Afiobk6Y_0DRI+PH)*wZ4!+>X9ZjJ3t!KKqNwh_m6t2*)1N z{L9=GMG0=Jt7l76!)&$UzE}Keu}el)|9tR3UqVXN?eRi@k1*l67y@2#@z8{>hv@{{ zeow=F0BrAltZ%Zn*59GX)PMDQi>iS#){jJV(@SQFOLBhu>t@}3ebqoFWpbu5$aV8q zI|n=a1;ni0IP))eC%4MzXzF^i!0z>t=~93QSzU84CpDXMmxO|dg3%@)O&a`RT`SIF zGB$Mmci_yuM{NX71_-F~9=!##lbEbgm5kuz)|R*AWu+lAa(df*74p{l5!AEJ+a0y2&dL&sPe`ch@9!6L5FX|%yUZLf;+yt1*eaY18Y>tPVJ#6{(aEl!}7(oTU$+Erc? zfOY_#U=|UX$?ISuxX9(MIi0rp-;9v*|J0r$ZP|rg))C%KOm>m`bWJr3#T^cz^CQiV zMkJ?}1dqJc>@UXq_gpuOJnh4mhV04>g6Tkf<(aV0SCu-tauuG!W-J1EmQv0#FdpNE zhM`+lixug8%Qk&gs4roF<;(f^M8op(a>;*?Clf%9$n|e=mJx7Y+uPazFKBN4Wq zIXgMz{M@mB>Zi4ij?M^WDyqDkg6V89A6`Z7;EslcRv4E70e!5 z*ZWFWf8C5>Y;S*mZWyNA*&Sx=-k&nbuycg=3FV~wEOJF+s?ec{7IJE5w>F4A55%l3 zOl&>+ag%esHJE_`vp56B&!;xcM=OjuHc$-Al%@&qf21hU;=HHA>lv63nUY#n{i3?> z=1gNqNXN480rtQf)4<>HsmeUFw$NWnIaEek+2G`I82;PkP(vUYiIjiRu<{60Nl8DL z=-zaYOF>vf!Dw)!%sGa%1xcv~)%pS)SW998IqioRE-}uwv4tw{|?}GUlPi?BO z)*BVIn)LKCt&f`kz^B~fjkp-I<%waW+&g3k$fcSVu|P4I>wg2ExiOLNr2e4}R8@Ii zQ>mT!h~YrlTMkyvc+q>MO{AfhOa-LZgJ;ELHvSa=HK3U7UxEN*bKgo-{GTL;Wz}B^ zGKU~BG5EF)5O_cc$cGssmNV1Sc9p~X1x;0FdrMC-`t%vUaTfdH)H_{PSU7yqeHu~k zl4oZtS@yg~LaVQ{B6Un2y5V0@X*GKmdrb==DWy8|V2IQB;lrIsfl!EhG!-i4JSZQ03#v(CQ5*?D1eHO8qdkjJ`N=Y^o{ff@s^Lyv z`GILbV^K$F>&T~O+{X;qX=q1ipUsO@DmfoVcO`e!mxWF( zXWO%3w?{iP5>--Oc4Ty72M+8rHQ^mQx$~yNuZl%VCHvk31XL*bH$8@*`{NI@7fiKo z4I7&I3R{QodRLz)4)LaOY{SfOy`4V%xqYd-`%J3F-#!rDW5odi4u0?9l7Ip&n0Wm` zlH#d<5(hG@e-Q_1_l!G|AoG7Q2b-hGE7N85^+U$*-s6aAtbQv~T=kxXKNXcGTqhH- z1!tsg`xK!3^9Gv;I~2~Ji_3rL;YxN*GSvh2elY$L@vw;SSWVIDHM(IY8GRWl88l#R zQudotat;3}=IPqtT7BFm1x3%`U0$$=c+?WOf-&sfkGM2Nb#~S8x`^8;H ztDeU}zQGT<(XM=BZa<&8SI0FaYLC_@H12nOY7a40ZQL8soMftovFZ$IZG?GaQ_l6m z7Zhq9Wshw4S(*g&p2Yf4lg7+$>`5XuC38#a-w7lEDtp5}Tr;dSYt`ccMiz*fIML+P zijxZK39cnYp<0O;(h7f~L}f;a*?Jv2M1PAg0Ybv&KA(_h<^dsvjA;Mvm) z3C4O*?Vn1FZ7a2&mffb0mjHB?cG!hFU(*fuxdYk0{q?C*I?zr!!#(efgxwvVNi{gi z>fsL#tYqa26QssFDyd(f>En*_fDSuj6|?CR9mAUZ70XZIN{^~>!*ZdHM|0pr@xoC-x7#=4Ip3Ge`cs_VYGip?CjgvVrQ#24KG_ zic1qtGfg-!`!COeV8DQ_4PFpL9BH&Ld-`bp8P74An>`e zPH+Uz6@`p4KCdyawcsx*25Z|d?zTH8jE;nhnHrcMwRYB+rg9eJJT0{G{JE`uVS#o^ zZK3Xa@Or<~2N^vr%9nJiB=;vfJyL{+95QP&aSDlD?W26t?(apsr^cI@`b>ulnM-N5 zLPkggnLll>UF~RVs|#^Ywx}!@NgV%3!QVX84cd42XFPt78AR532vW1ih{hf_^zO#k z^BrIg-!G1?oki^5;77SbVS%%Ls{7%tleJJxNSyaVU(ffdwJGJv!=Jh)jiE#5sAWU2 z*J6Xr^lc){YE7*Tw4__Y$XWGdcDCXxy4vzycvh|3w<|(spZ*wen|k8&EAa(4vK$8W zxS`G=kRj~+O?HN~iyC)_Z|b=Lr8=VUtrWOTF&W_a1(8Yk2bMKwrYj znCx%Tde=X>`N$TfXJ{yz#hP0e`tjio^q_}2SYRR6Y>BA(TN63z?$1n2{bG=&891$I$JV*LEb1WBtjd#6=;QBi;gxM;Edj_BaY`>$`Ie$= zu27m!8c!CSvYSY(Fp}!gl1Sj@Vz+x4E7Y~|O}}VeHO`gxz-uiAJ2pJLa=7N0#DYvk zc*lAAwom$%MSYJ=`H48NHXe{UjMac!1#$OMbzDg5@%8lF1VGKIFjHid=JQo$;rDqW zwft8xe=>1mJ(`Hw`rd%T{Tjbkf1NVD{z_?C&li1Nf-Ed?#&KDkyw?u zc3$`Hq*x5)$WL~WAP36smaUc-5I{nU>E>BWr zLg&0jtFp{6wKqTFHh0b#OvpQo?g=Iq{?HkTu4>OvI-D6-{e~fO-A{2U`(XuxUR_Y1 z7yR8iA04AfZCBsA&2d8JnvTA@_KpQO`hBrIim@lX!v0PT0^~mpBNZXr90d1xlhZ2{ z@E7r%>s&r+v<@NxSySS;reEy>(D}blTC^(s*CGcek_O+{R@7{Tf_SJXjqf)Rze-9P zyG@&!c~ljb4aQ^FejN`Dl2%?MN`pCh`?;f<*=wgB#-*qJLYTTqa(ehW4pWi+}OWctz#{5`Lm zl!AmW2+Mj1MDb!_BzJOkmK{jQv9;>!|qASAxbwEN0}EbJF@%cHWku_ zO$gL_#-@~!-g3@smPU=0J2wrzyx!fi?jL)5RG=Dxw$}y0Eq5RNK1sP51NJ*{MAI2a z=chK()y&EQ7}p&v=I}rOaY7r$b)We7g^On^Z-~6TYBhMnCM3hG&VwOw_9d-1C4EV4AxiYb;{G!Wz(=O zhT_2R|+qZ8fUU$FRN%!8UJK&)Doca0(Uv-VL^||n)E6cS}rE%Y%z3*@4x`tpH@= z^DpED|3?A@;I2;c&2UJ|YSIG?%U>ub_BTt?qas!w-d#R4x!r-^c)XpyDtOpf+`M!m z8DuX~0taJ=l^?^X{QOX$>cd+e&BaaL9bv~<-OzbzrThT}K7OPn&l{6+t8&+=!-^ci zuk;y}4K@c#Ebf@1rmAnJB>ZgDBGxU9;=Nq0?2|L(4=)IivLBn6;HWuhrZ%3EsGuls zQ0wdmepFueS*V&k0v*Q4DC1XAZKIC96SVJ-^1~9goZSI6E>BPJLU@p9UyPfo_R$q# zP9E~_B<_j^GfxT%*{g1dUmfsc;9<$`XIylA5?}^*rL8WtU6>c};HK7HIh00_2O^#( zO2h~liXmV$bocWFsFqH$-G9kyEHlM1p(a356I-d42eSY)M;qdSRS|&n(Vv|ekr(}q|Oe+1dO*o&0 zibB3(b$P4zXAfR*I|R<_k-lI1$S)yw)ox6Z?5VKxee?Tbyx-nWIhkR3?BU|_w?0z% z&k7&TQ*&w4LAW#rWK2-VC-1VoQRotH;>tBSxw@u`?Yu;aT-39suTOra-O~c!G^q)u z5^{DcIq4(TQXny!w^wsTGxrBj?Km9SbZsv#1i-D>0)Axb^3J?%bTLZV@ucEDtTSC= zQ#mVeeX4$z11kkyK%Qzn1tO*rm|fxxa)CB~7G=QY5V4(}8e7I3MIakuR*xH2O?q## zP38HiBz8%P0iwa&Y0zzp=-U-eEA%*^cTM@DnoOYn^CMhkYIhE)6ao>xgsfS;M&;V} z+n3vnUwL|Y{MrkDD+LBjU>(;>Yp4EJ(MmhczWO`zwOaHpt+}|~<>*NjsWPcm6$?oa zE{&t(bOP_e)GdEOK(Y!>mv39HHbKtVb;VNm$wJp61*%9#(T%iPK+m)3n;-Q%fTeNJ zts{k0IVhY#8bVmJcHdWLNt)OKJ!NW_TkJTsn7@1&8kq~?^Tr+iI4Rr=$VtR znh6igYl;+BFd;2yDau2|EHVI@rNq+uj{n<2IM94yj#~QCTtHPw2cJo`Q@*$Qyjn{( z2V?LhTlwY?#iVJwpCCEWr!!JEmeztiBYTisix**IYwhu#y`{H%$ry0rt?A{Tk4?;Q zSsbRy{u3=iEz<&$P-$svG=h=n-W-Npw`Ld*TD$}zmBsrtu9bN+o<>VU0V9%F=T)f- zuz_j-8wl&C6AuC6W=Ia=i1!=JQuwzkeKlSy;VLNdqaeT8 zYS1Kyf1vG3h z-S%1=F*M11;VNUURFNsaeqz9>GFL0Im`IU?N$Iy|$%PfEHHJ6uyx1Mab$nuB_JpL0 z7E7OPE|E@S^(7JNEo$Sgy66KRD|@sm_iLG+d0_rceHBd$7F`(Ho!~J2DMh|!r<|N| z{G%baILkflo-*&DoDf86H}TfZaeRpfv>o;}1molt0?Z_Tf6s%oM*Asff~(i#*PVef z_Nw3?PbZ)4Jx3auCsGhzeh?4RfLcB@)42LYozhsNd9e-hZs&^C7G!b|r?g)a^yq9> zW~AzpdHZ2oo!@EK9{GDF^>ocieG7wmp**(v z)b}iNa?oCKa}C3+7W18t4GmMhNa@mNdmj0WT@}s!qV)8)ju>C?$&JVwR9IOkUd%Z) zMf#@xwQtOVh}StHtpe8k?!k|kV1RF8$|>-E^Y4f;2{QBv1TQJyj>vG0JgYRPkwiBd zK_mui#bj$gc<_pzN=YY1{}KR;p5OR$$2;!pHA@Os}?POqbE7?EQI{^ZDiKY6J5F-HH)5!fjj(+<{B+_X>+ z*ajNKRMBH~_DNan;;wy7DTO5gZ@C>8)e2Ezj2on`AW>-0%4Z9z(c`b*O=68psNGz- z*D@@Q;~SySD%b@yIgk9~w1?gz26r1_!ALyQAMKqE%jn>+_6P;8vV8X=hYM6o&rY1+ z-8*8)lXrg%1V}(G+?eB7l4>dHgz0JT@k_dw18DUOf}q*zs8xfzxVz&Ql)_@4<-}^- zQ6qgZ>oa1o)Q4mNV?Ri}3H&P7(xFH7Rok_kE@FqZ);2a~uzsVPSw!Oee!9t3s{>#P znGOxc;b60!RFk}955jA%bvp11LhP;`PCou9Ej6KlgYPxh?+w(W${P{0`ys3Ku>rNG zkFC0=Y8a1lG^Dj(&-x2~wF_}A%uwCDI#nVL?!nf_rs=#m#aN)?h1Z7;lMcuPxT$St zYK=UUq*|6}FWQ$*sTRQiL8S|zMJXfgII+^6EGi1)h=WFxM^0~-rESQQa}@mJ<8g$ zZYT?48wkkzMHsjA?&w_JD=eO_p$hpvAT=n?y9Fn|hgS#HR5?B_|KyPtkEr)`9Mt9k zh01Y3M}MCywqrYWw7v1@(MNY8UDgn3unF-+4{iYyzSmYmQ~CUBy=#ZtbEbl)66ij~ z4Pjr)^je13^k;ZKl$Op^<|r3W5!LIdZ@iag40egPOfP3&k%B(#YYJH!oxRDIJ9$bp zgZ0#zm4*I1e&d-}kcz8RZY{JTijCwWvPZOfapSH?)%HP(1j{^!*%lS-#>g)Z!3nqO*yTeu>gc)u;q(;JgS0VqIV7cjqiUjsY8D=sgKP%UPBTe|IuJ+ zjwY>GbO(Wzb~-@Bd=Op@C4=T%Q~#?652AT&gX0Li z>1Zn%UbMmF=ZyO41@~yGMRZMLHQ#`r%qt89tQ1 ztA&2n@BmyQJ!XFhwCoHlg8fY}gn0F8EZQ4To-T;MO8=$P9WU?rqtm^3vFr>3#@jOZ zEB(ftJLJ{hz0|&8_+>ZbhL#v@Ub3rBT0 zI8IU=gnwsnZy7Nf119RS>{M=!*8>vri*tx+hS60=ZRKT6)T0Y#+r`O>*^G7ms9gv0 z^oaWwxFvXL{}9EOE3w_fDFT*J7cr}%-I#5n)u-nX;dPr(s14NZ;FMYU$jBnTVZDXc z+#roGhHi!7#v1!}SJ>qd@GpSKG)|dfwv++L{!pkVDn|B&!D%p>I|;MIRkA0VM2Vu+JJ2rmat(z8fi;UUPm z&(0M|Lh-F(NIT+Z=d{%^jfXRJD-$K&vEk^?Af4ESmJNk2dnz76wcx970pL)`LIy7` zR=|T}^e8Cx=Fd3M>RQnHfI-3?&C&jm&@k`#TW_M(pL59^zjcKgu2a;I$$_Co-HN<-*`J{_g{ zA74drznJfB?&wDt3j)R`7_{FKhN59qUbA#K%d=3Q5Y|n>PXt?y<_CjN-WWJF2p;V{ zv`Kw?{K5t`OUcG^Go`(6jedq`{h&+rHiVX2DP5S-;L-8%rUUdJi4O%4isRII}U0podBz;VX~ih1l!E zI2k*=5Gl-X@jXt!@Bqs~VUSNcX%!!RQUe$PWtz}~AL>K#oNR1BSlHg)4)7K=+CK8Y zef*XOLUsX~kVHKR*9%nP+Q9NOLF`T#YOlAGtH>-0@}r%;bZBHK#qrfX3Jm-SmXH3{ zr*F}RibpAe2y*=_xfB<;$=+>Yj5$Q<@*RD5o32?`9h%NOR=+77V4MF-APwf9mq7svCx zBSVYdB%^l^g@k$E7;+2K$^5e8+2TbeVYFf9ZIKbtfHI)4$$=k0^B@@5_^cO@Jt@1K zYLoyWXaeB?Ip{pv;u0mEHI=-cNNTB4#=R%BerAvSY4VQSGef%6dPlz)vM@6XL&RNL zd>rHamFji;nep(K9$b&E(Y_A7<}#j8wW`1^jqVHGG6(Hfe=i=#naHJ=GKok|qZcyI znwGCDclS}Psj?tY(3c7m#>Q*PSp=_OglpHOGnDLeJkLOD<$6T%nd=RZJ*F@{wgN={?;*^7H^Jb-2DO}3W5mT#c#&*_=zdpl%KLgu5aF*Y`}48>FD`K zAbEOL!}G_&lc-b)szOsxOgTj$O@hPT3bV$3NL@fi6ve#@)TfsvDsU?qRh~zet7vp> z8BpA`qvbL+biOULsOr2?K#I828YsduD+`-xNqFiubkD${5VWCi4}EMcTha;4E70SQ zIY*ZCpHjcc9>5n-Cg!w4GcbvxmYMcs?oBx&P&0AJ?+4cjhIsFk@E-p{oU#{>IS^2B3Iywx%k zsHf=3$@hK@DYpnKDw4XH)qK46-hC|vMa-G5_3qf(-gaFbDcozJmo_vv&*WxHp&+kG zYc1ZR>ye|H+BgGfatp_Nn;x7fv1bif#ux#hJ#|ag2mzCW(=v!#XUvD1xmHFzp>1e7 z+ADtfl8TiY2yOS`@WYdn5lxa!Pp2K~7E+pDB46_NAyvWsBq-3>6K>u0<%AdPml~yF{;&P<#%VJC}5yad3Sn@UX{#FTr|Vm7pOA& zoqY){KqUM)(B<$PAhre($P|pk^v~OeGH9fmzm@H_3<7yPtwWfeiH=iEPx5AGbp)zb z?T;yr87CZ2m$PG@b7CBhn!9eiNd4TV%-n=+18JO_vlZ5W7)8=jHrR808L3Q5bmXsT z;trvAf%ay>$mk;!(6l>pC%y#6d+h!JM$Eqfohr4SKh11pneD0pgNeWbal^NV3A{(z z+`pa8l(u?{N$sjf_{1t~j{9VSiZoi(p>#E$u+!?#i|*TdZo|?o>x6jyK{sT37_e{E z@8972$06sKl%cTtJucQuXDW04KLV)T7fyhQ2%g)Tuw;8SIQSq4=+%J3&$aq(E!^q; z@uO01CZN@CZOqDfw5XC&`|*$v74c-Vk(Un5|LKQrpU9?9boa7l%k*OSn}6M`TycR7 zsjk;ZfGtW9~8@?`3K@4R{}G8KHq+o6i#B{CY{dpzW21$w{fp)EBKW zWvxj|rm0fAQ8bg6)z!>?n2pkMBa;@?87@~yi_pn@a^p=fDRD__B;YohWhJ_zgc3lO zx&{KdTwLrp?h-Y=m8QP1*Fz!+*8N*0-uBr_r&}rfe+>`*#0b-~S*fG9-a5OC&ZJC# zXe;Did<^!_b~cde3bO#A^0Og?42DC^lRB+WAaFXz6cT6^^jyZ2cl`@bs%hnux31&i zB|B+pG*0|;>u~)aLeT58p+~Mt_;K)LqdU;0xORMcKf)6f^3~W04%jQc;y>gHc~jkq zUW_^NgnVbeIC*`qvI1RsETPs*t(NLM;$D_M6`aZvS?jyT%GK9`8%wznvmleo3h>N| zHW5F6`Zy$EZVB_~8GBoCpG0a=DAUeUGrYAWjUI3fDbwJMw^g0YJx??6TjW6H+#GKvzUwLY^{oY_7uIr zu?G-HLpDaIetytBI)!N$#h68Dzu~@CPf6r4fUOZuGZJBF@Oxp-?w&AIp?gJyc{BfB z9xcH_70*jWL{`!gW)Hexo#|2C=b7ydFPuO@|y4A9?5vsPHe8F>TC7 z1%AFmh5zEs;W#oeLr2IM*_Kl*_wn%5m8{ur4MN34kODWi`HLC%>5~iIba_;bDzI+r z8*Ec_|H7u{{dN|^z{HQ5$i}npRlTJ+K-zC>?&$yQUs@Qr!F(v^oROE;8@Bo50sd$v zl|r-P(aNTZ7MAx-ca)kbVrO`kBmfl{u_*Vv9I$sPmhP1C zy{^t*O849A4xmKj2@#lC*u@2{tbThXhGVSBw>Pv7m}@SV1gv40yllx#r(-J`o10nB zi=L-Uy!UCdo#Kmow)HQTdo)QvK3jmsL;rz&U&{HKwf@c7j#0hUZ&k7-ADs{^vAGdO zGli=cE0`D+OPEzYewF(}?Sp?7b*mB78zTn|#a;qd zH6l`l8xoDqS3~UIwoez6mQG1e=W&W=?qZ_<749j0iiv5I%Aaiuf20nVW5w@tURRm{ z)_aPje^yo);~&#z&JAg$1~#L;#q92$;-ms^Tjij-T^i``G%f z-9_t36=sh;%K3u%G9|Znj-RL-%B0LXy(gWiP^2CxMMnb>;W&Mknq`A0Em<+&Y;EJs z+{N{-O9xziI1g(dJ7H-JfA>1TPdd*_H_%lM8kuri*Q&pN18yc~{e0xt#5C z+P|YRR6}XhXQ$;=Y1SvYjNRXv-fQUVcqezQo7<#X9IN55(a5~bH;vL<(nq5_zR(f| zW*-{Refn1~fMPEj_&Wr7abFEiNnAkfKBY$KN*$f%aOIsSzfr7WUY$rqY&qZ4*6ar@9x@?-n&Qx$1rrV~Pp9 z#7iip5fqgD@*@n6vOn`tq3Pt6J9Pu|R>%?(%8Ey=fjfi1ad+79HyZpYvkq1bia%r0 z#VY0}D2ZImcx(+V!QciOiIKv@)JShUW=wAY(zs8}Z90V5_t}7EYAkI#s<-DRAp05a z3rr}I#c6J&0C$xOYwm#Ef%cG&9FOASWjn%En&ZnuQk`k6nh(=ARyh&3DA*RNznZ{p zebN<|KB<@JRcGjtceoY&4?npCZse7nc5}Lady|`TPH@B1g`3}}r!S%txyT2r(4=$6 zhAr%S(Qbhc+wR2OM%C-C0;pDsj<5K6h`^XnJ$iGlsO8mi}uHVl!LZyZ1@C#$)z$nHuMA`p!EADBcDTy~4V+uo} z(cQPy;2o*Jxd?Z2xJ*e+0|Ab|FD3RmD;_raA^0&E4nTbEbLyWHVlbr4pAR2tO+*cC zb@7J|b%h}zq0=Ak_%7^Pex8`|p78|ACso}b;H1#AGlLSCd|*Tc0f6mfxO9sUV+Q#FE%mltiQ%-G2d$l2r2TRTH5N&l+gPb zlG-V?Z-U!PO=h#`Pd3P&m?Khpl$JJn_M2c81@V=I$GQxdtVPY6x0;BU$fKO6<3BiNKK+h4fU zJ$Ul|H4F8A?np$sm#?-Ji`tEl2q|^Nk|QO6^b3AU_Y10Tce-q6X~{H&HD-T>r?nl{ zC3e|vse$TO)bnax6`xS&eDT@&Y>siei{jrGR`tk{hqyP3#lc$pwGNtm<;~i5o}eX!lqYD51PmTR(30lN|N5n- z1UV1m5lT6~YF6`&lA8Lii%Wr`)Zx)l{=L#;P>>byQE48)$h}99qnOGVMKFzsw)0fP zx6F8cr5eq$VtWCPl;@hKc?cr;&Oa)}J(lZ2b?&aS2{7w>8@?>G)020eZ8wmH$VouGryD?+c0Aj3E8>Yge1! zy2SUGmz>$3yl>2zeD&wGJ+6qFoc8utnHEwp#Rhjm;%NyPq1XO@U8XJBCG_lm*qy0Z z3kzNXk{N#kZ>;}_3QXqm#W(~nil7J^zhz*mqBP3=j;Tgd`UA?m7lsYNSoauj_6MP|PN?`pn}g=mB5* z*QqgKRpVy=t5>fI0Hkqxot_qZTd{s_Z@)|OLZO?j*-KuFP`b?xGa6iVw}frq^T^3- z84X?CFORk&WuZv3#X3sjt*GkzeGH3GZ^0#Ottem7*Mk1FWMAK_gEThr!`!oPPQ9NU zQcUE!oU4OIaY9Io!kC!&1J>hW8q^eQ4e+Wc)1F<$mWCU7M_-@O+19B@Bac`7A5eH6 zD!~q*rkveqU#QldP2L&|oGBvpY~1sNNBf6Poz|;N*HxtCf0%dA4s-TqzM^0*x}4kH zu41}e?s%|jxNuol12NhJ-zW;zPzi4^b0W;!p{KxDY>tlN*@@2{w^ zYbf2nbk=w57{@7(wKHmZ8qw1#Bxuiw36G%1ENv&fNRfrwPn*TI(+qgB@=L2*&fT!J zft`lWczbz!UpRM8pYEMDnk+Hhz1mDk>%UDg9qYtDm?0Q|^UcX(Rgd@nh#H5>Su2dwP44R&F0;T#@{L_J;SQW(`X74N*!eN`d^BC+J9IR2vtoE_xw9;0 z;-_>1We+l*G;$)iaRT(07)nq{m7$cdR6V`k7~i z`yslzI{MPaIW+io+Dc0X1T1pZ_H&Vl+!QMv0`|G6|2a+MGmf?G9RM6WyY%k?^5!y+ zAi$vBl~xG(p%#C<5*;pVJ=LU9V>id~Ea8oT{vaLrzRLaZkK0_+J*dX=fWahiWx=+jSX_AyDvI!1h~_j5l7`wNM;v%Q~4>HbxBu zUyHabBl?L#aD&e$n=T5<6w=8Z?`AfYg%K6^1D(g0!&`z}3L?EP!#Ji~(-X@Pw2t5=UN zo6klFszHV^IiK9eTg8vQgXR3F%!~K$-qCeHm<-sqK}xT9*o*+TOU2^(hb;5!7sANE z0?4#XROwrUzVp5=@a@29x6@F;Kp$BQrb9v8I}yO+AmEeJsixy_?zL6M5$1K+fUgI_ zZWL#A+A|J4XY?;ReEHQadK#)U84;K9rC8YI0EgXXc2|$sdhNj1F}f^S$L2wZ}<4FL?hGXB?e7mKdNFsAth$efNhoYncd_|qz?H_ z4EN2w{jnV!jeL0s$2eOzeKCuTjX&eA>=z?9oz%XTJ+ltr9=WaK6TbT|={cJcHzKUQ zz>nNzsIrywrK964C@d?tjx;xg*v9^J9CP0KZr(*qzSwNEg%;@gyjTb3^uRHo!ubE* zCSY%AXwEu%)H4rYuyTAHtmKIQ9jvyI_6l4hN?D0b^>ujq5(h5_zn)Q6Jh#uqsjoWj zw`(XNV;>LNSzu2{k6*vI$ojL$%EkCP6q)OGr3;fok99z`CfV6a(-o4pI6RW1rtgI-f>g9n&0gl3idvYMj@hx-H<2r%fk5aoh}HmKHHbo z>bD&2ZB-%bq|Uo}B*H4q(a9JaM%>I=TKYQi?eL7lol7UjG|;rpJUoOV2YQh42qtxY zcTr!9%ZWK6gpnV9?7VjHJCG1cT zAgZ6hknY(!I@EY$2qnz@*3|SGaZDEnd5|~4MuejvPKZ{L$=)NoD0cYS`cVpqAQaa) zU7YTsibir-k^8At$L;xR3||jtrub4gk0;hRPWfdJ(N;<%>P8gzVtA!VeTqpZDR?92 zsdfd2CGbVQD>SxzLvEy=D`vvNT4EQv6PA;b!J}AzW_^XnL87jS>z?sJT3-?6FR|MffEK?rVRhC(>jBP>e3a|8r3Ol`^aS;_ zSByLGG=-l={IIrMhCDnkeN(y^ z+}?mi0QdKPB^d)Muw4C1bA8VS>r-0F#P#IqdLF@ZHC)iqBgFVpO4%u3j*f5{CoVZ7W+EF0Q}AY|3B155QB_+ z_<98P^&NU4jU$$X0h&4#=y6+O`F=X-@_z(N-L4DcE@JqZh_ z5n_bBNk}VK2$B!XsW5CcHK|Ra&jhFio!2=fR=xqHySYjJXvjW!4CAH&B}ARqTqnvz z^cXwsfdPS29eK!j_`?l2uAE)W3tRIv!k=!Am{dBT7CZ}{4c7o^BQUYf7|4T?rQSSm zYCGRi>DRa$nOr(aA?>~#&m*UPRgO#Qycjl=(XQTIfJ7&4R)k5*r1j^g)z^iQH5e3B z{6gA$H8NW|P~F+MnWINVl$Mx=^CU`_=!YQ_yCBFR6$Qi~FWf`$w?0aiX<_YEbTqSM zyD@=TRnl$hxT`nrYxTtwy1LkX_GU4&QHb4iF>pgZlZLc)_X(IAttYdMOoY1`$ z=Y>DBT=8$;_IP|w>sIPfvvZw=RJH4m0c=Jt%_D*FW9{F&svPHoARVT14k>xn?n+vW zJHsFbt_6m55&?B7(qk9jBM?zt` zv~Ykr02b4NwR=GIbo}93TE9rW7WdP^T!iN>FDHBIQ{~HnV3XyQ2b{h8 z^Pi@utSM+?wiY7ZU9Pk)tBH})!Y3at!2`eY?&XwXX}>Qdd7yo)Bl%O(Rm5}vx3_*G zQtV{y>sgiCYzNc|Bl>|!+d-^9Om}~u?R&qXsbMcdY(1=jEjyr|G**72=Ax+#6?GGn|F9xSlQa;N-ADC-EhtpT=Sh-kIF+2;7xODPP z9+etRDP|%bay7V|UuT(=G9^*Ef%>-e68XXRrUk&Y8=+u3vg0%oUFbtA&NWkk)pNH& zp$svrRGhz}m3EYcGt_9OAO^pZ;22h)WA=U(wNUaykB6neo7JiZX1a&VWovwacIRIB zv)~>vp_QGu(2Wpk+wzS7FQz@5g^NJ1T$LP2vv`}AiRA`7L#*IyLa)o^rhEC;T%%&E zfSn^okhAxQbt;+HPV+NA|)+Xfo{5`r*t&Jyv0un=wi^h`muE-TuKB zRO>kaNg%MNRGvt%H;6gkeW2OlG|=vadbTd?PD!D5Xu%J~O2^cQI3Xb88#bxaD}Juk z#b1yS$SVWQBXrL5usBA;xi)yu0=tAj9M}k=+Pfo-Fhw3VH#c7?DHW3l#U|Ade>p9T zU?ug*Kn1&qXIjP07=HNhYya<0wKy7a7BU{=gzarR->XG$8({raQc@CQTcpFBEtcp1 zA9cgFf_mDC2G7Z_7RC!U2>-ul_sm4~qJ)f=?AQeA3 zI@p9k{rJdLx4uvr8GNm&P(W=_-%>MXW!0EM;@jJ&o}Bl) z5@euwy0o;!S5ul64?c5oC+)ntaB1UKdsC^u7Kv?P@rn25xtN-Pe36D#R0OPb6{bgn zmW(SVlZ@J&UxStLsiR${cWwCQ>`A9mlxm7qzpZKyR;gp|R2rcEF!EH+=O^_QKq zB{EJ;`^t`oGGF%tOW4iuaf92Qe+^3KjLz_Wq$2X*$WNO7{Zt#*Z?-H*(;q|Jh29C; zzn0OGb#hfH7291_C`Q8Z9xzi}sjXkSk;yX}?W016$2Uq(CuSQc8kK4LsO!eOfe~RK z^UNEe2%j*f8}84Ye5Iay2(5T~hp})oqlSVGcVmHpWA=sud#aa1_}}Olu@n{=y)2Sh z$R=lzCC7~;Z!T#J?Ms#pM5-mVt;`P%ViOs;=|_`Y5$;pa>Z|=pZO}Cr{D4Dq{ctWG zxuV9`-oh!k#rR5q4w5cNm#RJYwiv zvlSR?W%L}m(6$Jt(jzJ#{Jn$Fkr4t$&GzLZs+zx$;+RqgVm-{9G-B7S1wXixpw z$9T4_prA{^`{I=Y3UyxbAMr=KGA!Ko#hgrxlA2rSs4pl~3X;21@e3e7WQ~3`6Ws&` zPvd;*>5o8PG4yVK@koLHPvv2o3Ya&w1T<7>5;(k9tf{dQExG!WVdh2b=U12(N3WXy z)+af+0IkXA&A&|>L?eDL8Al+ceq>@~TB#tRyEUc18pzQf@Zita#0LL-VJv^Xa{AK<{!O-IISMXQe>R0N@X-%7C={JMKSy?3iyK zJEq-5$#qJsL;JKEHdA)(g^olQ(IqIwbQf!K2lt z0+bmnJ0T`!2CTk~!Y{0zH*$Jyn>CQg{l#uw{pLfk`jsBGP=V?oyX7QiOZT@{H|M^Q z$a3oTHy>^W9wtJBObe^hwbW_kr#%6D@rtYO!IczP>TUL!7jcSt+=HHZVfA*G+JtX1 ztVaUyK`7fU2w;s$GKgqtd*7%;P6l>#2~J`NBYtg$P-JepwzwUxw45R>ASAneM=hnq zea|848X3&Bo0dz+57b2D>rF4`H?gjk#vMqHJajO&7-q;X#{?S91ZzS_WD~%YHZSwz zJou&66^aRZF$}dpOKxRrCIPL<8>wf6TeP-A8>=il=BxbPSy`T6bpHY`*@EL=J=qm= zIl+llyMyNUl|3CDy|pt&K0_xHt&Yw1NJqEcpLI)5wrB}$KyoW%Z_4D<8VPpR4Fc(gEim{4LFBOFlNDgm85+oM)U;pqi^jc^swWt58zBA9Zwlf7<1#{2l)w zJtQ~@B7?7TtWgk$)zwY~Ozz|!{vGiZryX^t3yxm(b~_DfqTAhhYX=(?QuRKlNqU0i zvG=Dgv|u5!)UcsCe$UW2?VHlH?2^yK>`lY4NbL)!10nWVaOq05XvU4M+q_D0M5dw+|TA{$od>9z{it})FM z6|NB~N3UWMJJQO?WUM~H79(_9TDe~J>e`$8sn`2rNu)}fYm}?$#!wR zmuvO}C8BD`|F6haTx+4t>D$c$B!Ryogk;xstKd%uJ1wK=qA~dD*+PwyoH8?R$m6&C zg4lZlB3)RYdji?(Kql;+F7LZLA>#Eb@I2NP!y=-1$97}-o*qS$-}nKeM#n{G1c(`d zYG~Ahuq973AmosNo^38qoU$S<8=5DG`M0?)?Fdkh5y!}0O813{0wg{-0e_ys#N_EQ z;m+aDUW)%m2KZML7$pM|^{izUz-Sd+o??r2j`h zV9Gme?ni?!T3O`*q6KtF7TU;rTK}cCF*-i`>h{hN1%Y6+>wA1EI@$b0Y+>DCGcqTU0Ycb=mq zNGY9n?31sb)5>W#ZF`uxzg+msMjv<(P+y#ox~k_@8}5Nf=eN=huNT|ci*p7WrqK!q zR0dG_6k>?n)f#8Yj**8H7>m8-ocW{3aA?!q{$Y1u8ELp-!2Ra$W#5SwTnIw; zbW#s=`~$_0mT7%gsczo(!Y z_Ev4>rT3tRlt-@kh>o!xR6z7$OgpmjZdd?XJwDlcnk|rXFXCOjIMUM9gPR@E@KIR_ zwK*y&vXK?*Y~<BXtCDs3!ll9b5(<5=VZt}5O){@zFS zd?AMy9IyLK@vdV-nh8)p;i0|mUrMao^39!fjh|T483|0dzLMD#e8DqdC$`6jiZo=I z@%+;0s3c|JjwT+Y4U>Jxg8XLkXJhx-TZ7NPp&XMp6Eyu7QMG1Upxi8Eqb8hjnjM}H zLY_ZG_<7^E>cAs7KgOA3JIqkj;7V~tS;W*~$GOE1J%WpoN~pTUq?=NdgUl?Uu1rz@ zyl-0rfQBRx!%m zk}(>T+k3ib(B9r2gSBI-GB*)T+-cHmsSh?j!QLUC|MKI+fb>|Hy%@J-PZ@y!=q~?5 z+wz2OuH4MvCjqY>TJ#x^yT%%7=;!z{=$+RKlHkq_3Th2)?Z|8Nu|PUw{L2G-4-q!6 zT3fSVDkk3b?B)`yyzAkWkmJ^z<;4MqL~+VsMdbC0eyfv{y6}m%epK+Yr7d`Ad0F_22OSQJ!-w5A!C~%n z$f&<^E)IO~t_~W!7L@lQe+h%3uk@j}0e58^>dhH-s^ZhnaU$G61FNPY#p+R0nrAtJ=h5Jy zO}Y_=8vqFC7WKxKb!rIQW~g`6Hf< zDH|KEwPhh|n#maF4uRH8&mkRB8ahx zl^@a<_>MRqOGbMZv(hP^94Z^QO`=c}$Q9Bdg~byV79Pd>W33<}t|N-ajSAvYv-Eh< zt^U_ZH}1H{d~IUFjgF4qWbVAs;3hzOHItuD-%2NnyK(!D>VGU$mN?K>Af;gpc?V|LffGDNTzL)gw^ixzNPNrv z0bW)$-WQh=I>@7AkUD^Ay1TjB>BlhW$yr{0CI*bmn1CNUr1HbzwP{3+Z$)now2|1L zq{f(f7#(G#F_M$OY5CT3JK5n;?k0Ud&1$b>vQp>L zL_5DbQZHOCukD%beY!I7^7?RiUwd}~9La_dhpk@GKBI*7B?8dy_}{vjva2m>75^HO ze@dqeWpY<54^TP}nV^4N{&UXGk%2x9luA0JS`$FzgZ#^dJP9{i)*Sy(Wf$yEI(cQ< z>Tcw0($38LN8n7!kio2l&oy{}#-dvl=z9;(}DQ$fxKguF=?OgZyyTvo;;xC!z7 zD@!IexvRU0RmR55L>6DsnXfHHDvOb96}O8Yt>`s0)J^Jo>t9nEc64svGB_38}3Qlkwg(# zUrj?Pz2E^#cE~HC9G`_E_9Ot|DD?D7W~sf?LTc|GoqY+AD};byr@60J3TQXMWbsI3 z%j3Qez>9k|F3VESPL-9EUIDSe;mj9mz-}7PFVyIy_lv;sHA6e_0fs?WV8I5z2J|Q{ zvh^ieIN@WRy?C)D8>B3Vi9o~#MyM6!PcyT5W1C$_O|tB+J8X5-iY`1|UfcBhu$~{# z$E@vj=`qrwmHSIlhh=mK)hGAm$evqtOgXFpRaW{Oe|Ru_Kq zNW7~W3K=pUfxxA!&YT@7-;VeHLu(fF^e>h=?`pa4(`t4ik(;m~MB0ImQ-L^V^z(bqFm zS?}v+qV{F>2JVtL+Rx|FTSZ8gUSd;K^L5Wcu1FeI#L#hajCG{c{{O!kS*qe0echW}WW`l?skHzeron>~-8Yb^;p^98*|b)$u#NGDmd-!88N7lV}%78}w@>JqxId(fKrwrQq;inqrZ-dL|rt{Eyn!w)x25ZwVhpl>kQ)H*Y85w_=s& zTFFFCx}f0m#q}Gby}~}tzY?@h^!39}?*wUnN~T{B_nm)O6TLBi^C~y_gi1<@;p}-M zLvB0oM&hK0O@o3_X}Q|Z-S_2)4Utos>EOgd?U}ZegHPPmx&56{5pk-Y`?xaJyRv`r zF|jn(&Ls3es3X_d5Lg-PCUfE@#1ls07)XdnyPGO0dL6pC z;BMIvu7-+g#&<3J9&NO@C8F+YERlBFH~DThY+4~a-*v6z>|AMWZT(7il8v1z*e43} z(12ER{;%>}bXmU|N6HnkXYv1*j)BvydOUCM7YvkLf8?YC%l}koaul9_VZcjDyx`!E z{}gBGA4)}>>Ck+n#Xa29v-RtaR(D*+3x1+~Wre=}7R4dJCrjdd`of*2^BnMj4c*^e z*{l{RMD8;LDTnrAVyO98$0#WEtr8bnf|tNee?PlU({IQRv}GcHxwmQ9TuwZ#v22)7 zE|Ztax$%wLo{i#Yh>!V{l8^nA++O=hn^hxMvQq>2h^GckPgQn_o1K$8DZJ^eMKPrL zx5nRJoc3GQv*raYHzSw=ft$vk>B>9(p4`z0rw;U)G+Zt_IZuC}o2yrAp`X4wIh*Xk zmy!x{zv-lC%=LO^)5gm=<2>OpHLW+$Q(Gd0i$sRk4eJ2e3iU%*BeQ0z80TeD_-diO z-;dM=D?}-aY>;J#W$yAs02c1flpkOuW@lLe@O!*UoDXR)D3FVzmE>qgTvUC{ z&zE7*$b3GO%ANfEUHvOLxey;;U&&;we5(UM^CBj`d&I-Hp-AmknUHpAtiRRaP}pPr zscc)5P~k5SJ(hl`R(BkDx9IdfNz7HbzJrPX9iuR%q5_$E3mu z$WTp!>oeo=+kKL)<^_v~g)7_Js=9k$9s7r3kTUmtggt3J_pqI^W*N=g<>;wGM=}0% z*X6npHs-(4;7%P3~VHgF$*ePHr9e8gAc!wSG%w<=;tpAQYH! zzH)}?X!MhuRgKRY8}xb9H^c+^<_HQU`aIA$OnAkg25Zw@ zZ1N4YabGi@TbOF!{#FIIWt@@9)X{zKZg{wsNm%LV1T|e=k<_ip^wXq6GnvESA;-eTPtKZ~@m%SAX1>EeA^$6%2Z%8AmyT}& z93#dx65Ag%IBNdW3Y>5#pHe<-xDAD?H6b;_0ICv=te636L(4$oV$m75M70r9?Rkv0`uJxnA)*brwB$eM7v8>BhbI zrFk`N;T?38ec?L<@(XK&^x0-P1Ilk!J5yo`8cftkAM*Fz@0V*In&%r5V8{)1HLI4^ ze}>cE3D;=zC|r7$_5!YV=iyS-mEHAq5~rs<9qTtum7I-yGu7({cIO@LeVbi0SB<3CbTr=aF2c_cOT)H=T6-<#q5SdZf_Q0x>8jl)<^b=%bajF(lmYZ7h1{9V z(+n@nkpJb&HzA=6bzzfH^4kO%O7GFCF^9vkE&pGVD$R55-$mfZ~qdPBkf)y*o#*+NTsBQMO!TRrOStK>31L#b=P1ML46xRMv{1Nry6c@>&vA z(@PuhMT$pQ-N zZVLl^24MFaJign9L+OEd$UCuPK-@oiKpb(vO7;+gDV#R*znnOLBnd)xEb)z^yg4}| z^k@wL$l)|D8ZXfv2Qrf`$m1+)Hq0z*+<09_jS^`4WugAeVE`pU(B3`id55v(NaxqV zJ8LtI2(0r@>yV)@YEnHv*8&2FbSan_T>W#i75M#PVw5XB%H32p@cn`W4xA-cHD%_q z*l%f-7fRFY)1(Eu+gu$l1_B0#8uI~;6WXosBM6B$&JOl@sQPDboF7nnVP$7q6aDHv zJFNWyE1f^D68Ffpw4#vPe*q51$oI*5B3e=v+W1|f!QX8Q! zgC_@Sbc~O1ZUojPG$9|aHXA5Icy(O=?A|1M|UsAd=U~G;cBt?@-U;|}VpCi1 zIDpFgP%g7LV*ejNiTC;uPf}lF+HV>CjNtl*ReF%2TJWpdribw=elo?lVCk3&A2EcD zFms*>Q16d+@;@|SpiyWaE9b`*1Y#^D!QACL0A7cd>l&tgvdy&wqF69A;0Y<*(^C42-q;Oc`T;bC2_)t{;59d-t2&&ymIKxr+cm&g zElETk452%8nyIp}8-qCC1Mc+K71YRJO44WfJ^=fZPdfpo@HE$Ms^-!%b>K;M8Y&(P zM~`~)q?3yPA>VW`E%K-%UtoDM!}5gelPVynj|sQH2Sa=SDdhVDpKSrC7~d;7K#uNnkULFy6EBug#A_{3WyE$XN{@|7UZU))pCVNY^$@`}pc zKsV(Nyxu?BSSmdQi?`4Hrk~B7TxtDL3_{MnVn59WTs>j~c~U{z0xN79i2$fU zC$4dH_{b6_2RrE+9=PCima=lmhVPnGw~70cfmpmdEgG`$rK5J|wP=>elC(4*IcqaC zvcPH3MYym{Po{7;SwJvhL-`?a$AfKBVd{R=T+t&f^qr@VR;aY?ri?EtCSY*J?k3u!zZrzx!rb z59;QJ3S=kE%ve3hxOLA;l_FEF4zHigu~qfgOzl~(Oz0oXA(1yz##oIu0E){jTm+39?bVy}}{B-&XTs*@fXf+m7-0a)8!;_mf=fRnH5bvRAI^y*^U`V}r^7U4K z@%MQze!YOs=Pzz|zC#{}@q#!82kB?qLqF=yc$#inMo=z1{Ca?=rPD1>+>vSW0^|5D zJizYg>b02x=a7NRSYuYP{4-aWw3}U?EiINTDWpu$b|~qY1BqG*e&|kyPuAn9!kkQ} z+4XvX{Gfeg;gI*g5;MS{LQM>d_XE;T$}->CK${7JJmlkC ztIC`OJ$sr)Z;?2TMMA2|o&B)@)<*YKSO+ymz5RPfPTa>2*X{SzhR>lwt7#*7Npx-Z zPdXy1Yl6<2Rc0jBa*ib>TXWyi4*3-k*b_28C9tP3ghC-~ZpZRJvIrFM=*K=fp)On* zRLo&vB6MDr<`qI7IMCpR2}FN*VDThrkcXP09B~^I1VNy%-oxYPKQMAgW8f2ctWB_O z*7C2wcj2i7S$;GfQjq!g5s?!B28!jWp=?a{5a92h(Oq;;5g%_Y^uPXcOM|s-P#Gw1 zE3ZyPN?TS;*ZR~;rX;$88Dr0T6VN*o~q`1O=hnpoutd z2J6b+-wM}%gG!)e`@9qg+F!oFC%tcBAvUadM*EC=g;*ykXs3k-k%5<+8*R?gX)pC* zwTPC~z0v0elkjlw=gj~exO_&S%D{GhgOSG;N%Vtx{ikF07|>>7|F+$W?`|epl2>j3 zAz+g4#>6RK>$=dvm^)Fxm?OU&a2NROxK9Tf|B;%js^m9?hw*36oqhX;o4jW{q#RuR zlOHwU0e2BZ$1V$89Zy;8Y>9bj8!`g*FOLv!=J`M!mR^5B5?^Seano<=>(=99It$P# zpXos9s41s)UtsKm9Dz#)%zNgT+k6%2z`SQs@-F$mwqKAUu*6X{CZV61!h?)}H#j)> z2^AG*hsraRnVCda>+0XY^lwW0Z#x{Ka%vS%Ug8wPxn9YBL0qX41t^7lGN5Y4gyR?b zDZ9C&dQBuhCZDlkDu{+=dw$hYBr-qymG@01cfEz<&tiz21R0(BvtcOnVz?&lT$QFZ zlaf#WxseC4UN(z?Ade~!N~tF{ZY~kW87XhXaZoYsUfs85nPiVRAh%RBt~d?B1tpx` zkuMx|t>+L*C9(IDK+SaxuG(1#txennii^L>|J}d^ebiJoHiQEE85ep366LZ9J_NY* zWa3lGSa5hvf8j)aKtpSGceYl0SwjKbshxo@4}iY?Ww~!!PTriJ32Vd&G~6KYkkCwYhaZCf*C zALiMY22LK8LbPhb+X629yqalIF@sWN+qfA8FhUkpo|PuW`-hb8>NVNzM}+uMYA>|< zYwf=O_Ss*|%`V>EMS`&LN0jRzAvs-X!TZ*}54r}H@|B!Gl$I2kIt>^ELaDNO&eAFr z?uY(3WlbGJgFHxibgB=52-1t|>-G5d#8&b4RB!MtuCLYJU1pT~pHiKgv%OLJLCLQL zYmcLs4+A;|AUGcti02Kx+cyJBF&SH)1H(QU&NbCk@G>*(s#h+jUiw-zY^(MU(&j>0 zp_ueFW=tA-Kh_h%FG1FN6~@pD`7gHsN6|`KqMj4%*uY--QQqXwgjX!CI*9Vz53vS@ zmp|GJ9T!e>xr`d$2>w`Tp66ISpgxd{d4J-ZE;!MZ%nYx3BdW z&jds8hb&BJ0B6VEVSVp^bDn|nz*PcDw(zi{i#$vbI7^Sw=WYJ!nbQl?@vji+G7V^6 zL5ISVO%2zdE>8D4y`B2GD2Uxb9E}&b1c`7_qP^$a((sLT6%x`<e5r9+>Q?2XDL2vP7LDD1Zq--^?)w>#)`I@G+s&k`V$mHl+ce(sG< zY`n3nx`5agB~oaKg>?nfwdwW+q!Q=x8IgvT#z%(ClcN-R2Cr^@dulw#{qi5yk{jS9fmmv&w)8zVCh+k%*Sikm zfzo1<_AV4Z82OQi#nkgQw0Q)JgjmP^O#q{glz#9Y3y%S?CLZmokAwo$E8i!FpiYHRFpv$^Ok(wFUrdgro!Wrxrz+aV6)77d8 z;4nue)L~tO-lMaixtS&fm@5-3vRRAPzWSbl6u8&yMrN@yYhtlSI&M zAI$<~RJZpOcjIXA`|y)SGHB4wD`>mdT?EGmLZq31C#$m_Zt(wm7r;e39AQ9|88NiC zC#;H6nO3?jVx2A->u@XX)fXR^gVl#HN)qsRxp2_@(r&G>L8+wj*IV>s@XdX@)t3om z-?N;9n)3_%CPN70mC15IaddjWLx~L1Xpb0!YuQmOq`jg6%Pm;FtnyoeKhe_GhH)Ba zzxhy)T=dEZEvx*7ux?Y)J4%5-;~4>?%P1yHAaTu6xh}7zsa3*vg19YFS8sb{WU5bf(|B_HKZVejBvZP46Z-^b6+r2C^d58) zKKaNs=~3AEpA@%#Yi`kWx@@-=;37-w4hh&?trzVGU?8_WX-Lu;)~qLMivIXf*`A1g z?|pdj&sNvXqTy^wEvf6ChlG>hj_oXe0SrwGJlD9ec~Vpy;PyQq-zoLj?A8&D^<3Dm zA1{=&IV|`pAnEnXxG{hj1%X!vY=5ckuQ0C1M!7-X&6QUM7EoI0uol6U%qDs&&Mq@Q z?Y3VJ9eu4Sptf=hf8Nyrem}@1QjA_!PSJ-=GkCb%9MQC($oHYhr@9i0%RR=<{dRI3i;(YT_6cd7x9Mdh-%4^bzS45N9=HGjsV`JfUhG0BB#zaFKNFTTI zYhe+;S{(E51E`fMt?!CJP)rVBa6`-LD=L9+9u|kx9-jqSrXlus!os8b=SDh=gs!s= z2S41z2d>`E21`>hUbODM*1!yUcFfdl$=+S&2?Nh|o27{Mu$6A=?{s@7bu}V~{SVb#?(g~_ z_#S!!Yu15I++nxRIEFPcb2(J`R#b9x3*_FqgYp95?;`EnQ@p{ai7h9Wce|vAPmaNr z8EgYD-v0t>w)BJS!5hXqz+dSM!AnTnPa8oAa+w2o?2l(b2;?U7&9!AjR+yr~(CzP| z?K_g=5(`?fI#9_7@lzt)73xGWP@DU%$&2_if-0}mzaP;XA6G8mgYLRcr<>{M&-#?G zu37tA{dthVBNvXJg_X(B`A~UG5I}=+2}2FCQKT?NWF;c6Cys~p6|X zLP*}+Ivoe$Xhr+_KIcm3!y*|t*@17smkZ`CdhC%4EaQ_BIZms`zAjN$n?gs65g!X~ zPe(yuk{q)FDolB>fXwpRbx$lns-o6`E8ZHqhhbl4)c9K4*bKA<-Uh0kNwvKi8@$#SO)jraUUH0t3`dC=h-8=%CE1{Y2& zPO0SjvO)~p;XdXsSD$#$;RCKWs&7DCm$*@?Z-0A>rmBmAJY_tWyX z5_hPkSx^vi-C`Ok8geEjl^26D;(HcHQ;B>+ReQTwiKy4tJwpwB{boqsPE^TgW|gL5 zx5a22FR}3)ufy_e3{pP^7ew^*T&>4Vr?JWZ(l--RnI)jC^ zl6Lx-J=-;d%zoY^cYyMKe|uG|4ng(SR#EZyr5UoBTM*H-3+ix8L%(TsC!u(bd+Pl2 z#8}t5Vmix=rv~aGEmfJa(!S{^| zO^IK_#n#u^fepuTkoGm|l_jB12lkbo^<_pkSJ5D?!7r`LR>(5lPf>y>uYO2?_A1qX z1!pLT2rsvTwbrCRrcX8`aG)9~0!}`p>L30||FxAhNc$cmx_!4!$X$8b_aAZDxA*834AO_Am&pMFwq zpW{MiX`QVJPDX$6*{NcF@#G@7o(JDYG+h9!E-v-(W?{AuMTF4yLVIJGs%{97E=Ur@ zGFMv2dnJeOsgv*^FJHXuKk2$#?_VsEyDjUgQr6tpT*+gd?%BPZB=Y&Rb)yotq8!Yrm97`$}sxxu-ehc5jV@@ApRaU))(7RHV;eVWFHL4EZdx{$YT4 zpFFB4DxTjRA9={c{}UE@-udPi1E1?-sVa97Axu){e`G<*yD#iff)ZZtW@@p5Tq13i zV`*dWUY8e@LDlKB<&&T_k0isRlT#pI8RQ+fW^spwei(yhDqJ8rni+{ z*Oown$l>MS*YasLvfhf4AijKhLS?Cul`)@yK7lA)I%B(pz=Kc58ttV<0#6g!Ar$O; zyYCaq@>LdZ%2)PVU3WM(Cf>e16HjGQn9@S<|Pmi@^ z!!4!LXR{^TWe}%%?Md2^FawtWT%OBGPo4&}!c4$Gk0a^;EIh)fmK1bIP}}US5tmAy zsr+7+B#wRq>j!_VAsom$YT*SoxbOzUzL^lf^O$n zI{oTD*AUi7FY^R2>x`nm;_N2!aR!(!I)xV#sfg?2+^~B?M&iP~=d{5^SPg^cwoGIi z{9K`F^&TFdi*kiTjpSIu%I==hm@J^`&vyND_#cxnl=y`=#gOsql z*&>?#vef{h;1Z&J|B|{XZhs!1;UyYe#o;$Sa+JRouNHfoE?Xnf5y=@*H z-qnZ@PXR_xyJKGhff1&iT<1BP#0HsuWhZBAEc6Y(0DiVx*|%VjCW21zvpG zpT9|w@VS;DWk5F;hD?wG4lLr7 zC{9kert%f($ZtnvM`_en2{JOG82Mvs+KJ+lk=%&SyM>J^9A`bK4Q(tS7A(2vWwJaAgr51x+S zw=;ug++od=Ih0$mLVy>S|bd8IQL7=h@MKn_T^S9wPI_29f--YSX2= z%(Fs)p8|+^Rhm=-y|#I|Am7 z3>uwSKX|@@-#m8{NT<}7kU z+y0;832yt-!kjCzfEAWCLC_;)eqkh+W?1tmiwVJJXWb>{VG`1lI?WLq2`o=;Qe>S; zpB<2GgvsPlAvcAmY%40}3>*~s&w@_Q8&gaX z11szH@^I))Vo4C8eqUdox{FJ7O9!X!#2eI$4glY!yusn^Q2I!Lc6%=1MrZm@QMpqL zv>qj3cze5dW=yCpLs=7_!Cia1^-`7*G0Ee?XSW zPyjkAq!#-Mg0Ab|243yxhO(-?$Vq0om*&oE!eAGaI9~25sBhBY>0w!?zAmr2EkocCq(siQ#C=x~M07@YaUM~Hu+_tRuUmj6sM;khUK~G2;Drd6Jr2Jg3WE$INxjQ+p13p)Q>y8u@S z6*89aikKxufXlPxYmug&Q}Env)05nBrG7U$8dQFtfHxH;;XZXOE$Yo-1KSjj?OS|e zL_EF8`&(gvhd~N-a-jLkGi@YP&V{8j@-ev4hJnVP$^P=<{9L$Z50EsRCiT;$AvLjR zvx-X*Y-9}$(*t?r*b(#<>ZzTk{=|h1HdAtsIz%9%-v7G8&uVNeX$~bAbPKHy z+UAwmsT+>B`s3tIV)ETtVdH~V$L*RkCo!TJat*UNu6@O~c{I_I^tv_XhQIs??dg*P z)`b4pjE!PLHrFpuSqvkA$(5_;_|+&h41itAZ@Y_L>?TXWx(%TEG%5rk8lg%KfCHOo zt+N4ujpYC_l;FvoMbJ0{t~~WZL*bOGwS4@V{qth<+(Mm(+hXBa?x%)Mu}>yK*?G5$ zOgdcj$*puY?(5gH-8{H05G|o$3~9VMz4>uF&q#7=*4t9LXHS!dXqBmJ{sK;nUItWy znwk7KF}}~vsg{C#p;t)`#lL0n-hD>B*bb5&NX=w%Pr_qHpR-SIPDPhIABdH|45V6f zIVOB=>G7yzqjJX@iB%gu)?C znlP9~ddoRukCeHu)0IY&UpLiU9xQ&-DYhGlbqFX$hhoMz2-z(<;B*45sN|X&AU_~k z=q~Lk71jqK6`hTU{9vT=4A%Z>|@1`9~MbugR9$S z7Z|1s(TJ(nn4PPPIrK;@dnf&}V!_A1-=GAuue7OJ{sP}6tg^ASX_r7{2>_p^*3FBG zOe%Q6QO<^FcHlP4XdWY$ZwpK~Bu-9hGT2=;shAMF+>()T~*SopKm&sfwxcAYui}JHPl6QgK zOw6pron)4s3s%I4W!s2=D7gfx`D3Yf&vtR1BiG- zHJA_f)+gm=_v(PXIV&Thyndxg=}spJ35gB&L7@ls-@`tJZ3D9aRt$W)#K#V)18s%w zXG>L2;%{Cz{WB!pjgNZ&7bRllL(@G_K@1h7>L1f=vexJx6I0QM>>pL51LOfKi1Eov znmtxH6yjwc{dEKirFS~8Vp#F^iQu^6?FOtVlt^0_qsxx~}cI=TdJ zu1~i>66I&;sRSzP-<&J#voCSiv9bT2b|yU!ek;Pl{j4wcBJ@{-qSB8*1mZ#cw%>wFzr$E{fQZZ-Y6Cg zU1-_d@5lWsgO{qN2hStj9dt&RF)f%GzU35UQ4XA)d86nU^?-BTW)Q>5Q=QieS)Nm z|3=yfXQf9I)h3~J8#j61t4TInWvUzD*K*?Ai7**n-xSwBag|sPxB5j8M$lw? zGO9&?+TsKqyT5goNt;w5C_Bndx4!4w8pD%aKS6se=sb6!myYHtyEmXo%}tIR__VPz z045$UbS1vrR6x6%o_1RsK+W31iBgmVEQ~^iTP7Kcv_DB?!RnhZFY_dChBax1uZEP~ zn~+Ysa@T66L18cBJl<|AU?467FA`dsqSDoXuTUX8;JaH*c*5sj9ORb*9BJ$x9VG*W z>$WTvVRkl(Mj8HRog#CHi_C0q|8Q@yB>+Nj;d6}I57h8xR}gk3Qmv`hQIKh}F&(G$Uq^Aqe;T^;YaiO73U{6@22+ zvQ@)8J!eM>-nJUceY)$SkamsR#1$4?t*w?_zSE-i%^l+XA_sZexe_Mha>pFGrV_90 zi+NO|iH>h1ie3hsiYRj!6Z*lDaTF80sQRGly;)#S5y&b;`H=z48WN?fee6X7HDx}wYYcw})ROS|V zdIl;gDnkp+?l$MU)5Ihs1#A2O_@ll0J^RpAMt?X=jDN@b=$&K)Bi|7rg*pfkv7uvp z5T&h=e)av6>d8jb$4_FqN-+mKKi*t`{>ZyAk=jjmW-$E-)6g%8%v@f)GIACN9{{Gr z{Tw8%{UF31Vd*3~;UU3ApH@w~F~UcC;xCEAZ-lK5&F*k9_sDP6ln+a`#% z*fZCZ?{pB+q-ImtNBohbS2fp(jNU+Pf1ycWypj`J;{x>J#RK;9-1Ij}Oxh+WHlsLV zizB`IzWKK3>OgH9$ag~uYJvoVYJZFVF7eo}j1#yMEHtre6NA>XeS>DJGzr!d^3w>k zhZ4U`ia9xK#hKLxn4Uko&zP#YJxzdMU?7|~vk4ILB2J5yUF%7})0a9jo#5IuW?ZSN zb z@0+(h+^vH)R+v*WF9*e3F3LsT&-6-p4Dfd7m23)x3wn>cd5#ym;w|213(RqTkDwOx z6Ea7g1P(k*QAEZ_1C|_L`<43ocKqEiY(>Rq>sT;OcQk>C6~kH+sT!R@l9)LYh2vu% z>t-4&;Yc*nrI9qCr9Hvj2&lZl7geBsx?rnzPm5=*PmFJGMC%MM%+L-MoYII?$5Em6(bZ3(oRO-{u+}=b|ePH-a5IxK8X;lUH!4Pt% zNSizmodUQ(y@%YNRBklWvc`hK7Q)2Uxw)dHqo&v;WRuM>n2OfKWYBkF)J5S0aTbI)5Sa?g$yPx zD=Lzi!eDIQT+va6&X<^Ijh(0y!P#N0uYk?c`df_P2xy}d5Me~EqEz(cqHXt#L+-%OAyw0`jyma%7#_R za78{j7Gw8FTgBhj_3OGPP=Q9CtE>q6BREB84#%0T z*()kSfdVkUZ%X6WuLwgQ`N@wpCG%FU-m~W(t_=kA(u&Il8jl4u_;9*8V`U{^?%%}L zTlUu9hXt1FXMwu9`r+bC26Z-xjYTNA&mi<>ko3?!_IJk7 zTzhB-c*7s%1#^63Qh)e!bL2M#`0G@={oQAtHH5Fx4&wo*awINYmKY(UqfKLxr#)Yv z=+(fPbp%KMFyX+Ta0{%RHv`?C#~1FM_~hB1``)SY@!9S=Jb%7@<4W^%C1^gMw_&fv{8A~48(pCsXwtO>g#EMaC^OW&SxxO+T5TN8*X|HOjeNhK?VVnf)Tv;px z>Ts`~M99B)b6CoKM5i1(Wcv$Dz!;qq$Gu~)R24HP_(b_QoKf!?wtm=_y z4T#Z#opu-iy#PNbW!=~>YDSN7=}t2ioXe8uC#0hAX`VVieV(+@gbDn!!tefBmcgB} zo~gPoG=Qvx~|tV&4M?YkPN?yBT{+1N1!bXNo9hc1Kx;jowy z?)w9Lr>*cqMid;*22xOiFJm8kdHif%<-Bmh*o>*eN@YjkCyF`d*rYO57>qrWYHw4y z&*m8X=J=EV!W2Ws`+kdXp*cQhaP5Ix#1g`gfGzQf7)Ia~{v5Hp&Gqj2E|i{@gupJq zEHceu+7U*hj2nmN$!WqSD#FN3+%?O9`RaaZJmcA|CUqjaCWIt)oUM}{bdx?dn#*;&oGbX)E|def2nU$ z@APRezDUY6Kj9g(nNgv4aeL2o&2NWm5(58TU!p_BUun5U4E({$MegL=BgBvovA9gC zT9O3)bc%~RhFxDTDkI@RM9?Iau>DsmFPCE!a(lp?#86^DmGm6oiTPl$R3J_;rzW8rAZ=nFrtnc=^95X>8HE7jN%KEWYoDlArt+ouJ|(cIW zm{!Q)Q7{I#-lN3(w~cyi1z`W5er$U~TXu7X=)ZsFVe4TC5-voa5%)L`Q;3lgnQ_ak ztc9z%R8z&(cUwvIubkFkO2zvAIhE0PlY7bEGYm}T7uf%APmHuMt#V%Q@3pTnMz&t4tEe!rOc ztO2WUwLo&_!?7E}a4^9^XL{z{qt+%-v%~?tHa73oXa3ZcI~{Zcn`!hNtV865OzwF@ zuigwD_C~HggfK2C?2#lu%>kQ(co>9fw({$becu>sHoCf_$naK~VlwGzT;^p4XR8V(ZHdA8h_1?BH z7Y6DxmeWa)84zk9ft$)n_9jG!eLlYFON<}HPq+`kh&7`H4BP7c8=h85g5Mh6P}`Fa zUAA;vzsu4X-4k?q1+JF}FId-YiGiq~Fc$j1Z>&5~BHHk%btsXuA(-{Q1n~ z&8HguMXNri&FkBn7;V?%^3mStTUXzicsMi2la@>PQucV!P~@rK_SZmidaNr*9PAo~ z(5`#EcdR!$*p+MNy3wO{t#v__^kKYeMzR(~(GSfD=WnHqtKL%ann@)rBX1tn`KWp0h%w=m?AoM7Via5+A0pT3af|OU z;s@}%d847oTieUUl(q7W(;KO`9Zy!4{h#c(7s4DA6cl`bE8tuY7R0k;A#~Ex(msHh zs_VtRQC1?FhG%VLLOfWXNe&gMZtar;R)zkzkCoTpdOc_Kd25#t5W;zN6^r;65s382 znXGk~L3J@Tx*Up($GpnE;zzs747>StFA-@ZaTbh7-L9|g;Jj_fI<}K4UA8oISggz0 z)99W)dz$^yWb}UM+LzS!*q(o3)4vkA@WSiHbe*GD4;!-MtKdOvGMgLx_pV4NVEywhYXN=+5Y_;&y!sEjJu9@4kET4&l zz?@9H!I8|yxPk8@%jR8N#`Rw6pA(83K5Jp=bfv@)taBIYotCAx*(o0RIc!H`o>fH^ zBHk%llcOw#$tl&@Z&(KML~@bC1ip>7-EnH=1ft&G666)W^5RHD!_Y2rS0m%Rs1V@0 zPV1%KlD)-LD3FhR?MKuo0%yaa;U4_Pl$HA0wKI~6a@_8Dw3}z+XOB0EFG!EHyBb`m zM`>lD^c1L&b|OgCE2lvf@=mi!{wbRg>i zUTnYD(Ku~UEqQ4;*m4yxC6P~&bMXDk%yluF4SPbV>)8({s`lD#Ri#UUy>BYIyi3S! z#nQXf@bVa(;VzUKM3tnIp;G8_qw^T%T7#5W$TZyRS zppd#yn@`5OBG^}=LrIs(a@Uw|{eU1y@qGb@2~e9dq7wgl4dz%`e!cl)ArY=cGA6=? zb|?7;J2n^W6%IEgJ>pVh&Jtc5$`AH(q^*b+ei|9maCSS}q3rk^8`+-=p|6sK#DWmd zWikqVlhKHlM>U_DZ|~Z~Cg4$b23qL}#s_Q}5lviBmQM{uS!j%x6Y{aP8ulqBmX9z> z@{v6?mfh5fIr&{o5IwS^*wf{~O(mFmvPjpspR>ugw(w#>`!pl6|BLU;-L1Vt9P9mr z7Kj>4J68kGN(;O%PjsBH1asgv-HS)#ym54TCzx;c_@IozFs#pBzSY|tX2dc+zk~`i z{Gre9q^8=Ibmcu?^(Vj^^D5?R^u+zdZNv6s^5E??Z041O*lrmJPI1ED{@>0pRa*v~<1+5*fN;p%-E{MF){wBKO-2LJ2j#xoAw3QhomYe(k`mpxE#u)SWFIVj zd3F)IkDK4+mph74+o(Ji`&CU~0P!mzG{_6vNgY^krXc!)Y+rmPO1qPv> zou7YcZl*jxKex^7I*(AzWsw7AvI3ZCi#5p5fI97 z5AT|PkOhRt3D#ZqHnv_@{Ye8hkVYc#Y3>LcD-pi*fVxYZ+JC~qUK!Bu`~YU}yfD9G zsTKrZltInV>!al~6nOUF<@Y~A4Od{^=MGmd;Yd4Y9XJf%$-(!@&rB9YOezwMnMuni zGo?I<@bwFyvFXK#@FF%r?^=)8tsgGaFyrCT$m$opdz>C`hF+W}TX0_*bw4lG>szZm z0GWHg*OGf3UBE%7l_4bYhR3&IurMy+{&D&gW^1+K0zu5?eI8~~NVOV5xWcrGoQH@( zrGTiWi=KqvMgPihoPlZcrcaq@e1;OWic;`*n3Yuqa*<_4bkmF~hFcofp^d8Byp|-C z^#tX6bEa5;KcO@19EcJJm)Co>Zqu|PT}*PhSrp_BcSOvo-Yp9yr?)@5$nWESSu+l+ z1I7Va+~9%1%d)fcSE#K#ef-O%DNZKqZA<0Lubvn1td}H(p>eL6Rp6U2tGXL)6$`iT zp7|9>3V!>}H|S%snoy+c&9AE3PMno4FDB%?YgRtZwof(O6_Ld)qc&*L6n*zNkw?GG z8Iys*eXWJhQdHvUBjc4~G{oZ7{=u{maOItWM(Q;aDlx~!TQ$x)F3 z%q!Z_&|I=BQFT{gBxi?#>pL~>s=!y~yB_3Em-d{;KbWWqwVERL@b!BCehhaDD}NF2 zy!=lYjYVo`GiS*w*V1?h1=k%@QG@YQ<@)l#lMI5WX{W|ah2FkR!|iIi_}8l{bAKgJ zk;-zah-k8_x&-iDzaktdf_6vU+4Rxu9=|Md*|3G&qBT%ow-x)y@q||Q7}3&^`^T?= zVn|}IEM=m8dh2<+UXhT!p>dV=)7ZKT2C+$_gk@GLq@gtJiu`Rg?=tSk6_wgbu1G?U+H1pYQ~pi{sPD#CL|MtQ`A#pC90G zZF(1M(FfX{NivDbn!MDzIrzp5FI+B|@OGtbtk@|eJ`4d78#DB;DIJV7tk%@6-1iZi1Umk5Chl>-V3 zV|IZ2!T^X6d5%x8uX4j_4jF?gm>=L~ZqVi1lw);aK5y zEr!ArgR59{)BqCo3{@7mob60g;Hh6GbVi&M@GXEf=AJC3NCN9&;LG1Y&5$Q6##+2K z7Xg~@eH>AXKWdVikkG8+E$x*-|3DPYl|b4rnQfKoqTy4I&7$>Dhtqpc3HH7|R@g*) z2%Hj4(0pG$6echfURq}Qz#PBt7pAazz6R73YL~VCs+^vrAT6tQl}2;ZioZqYmT`tC z*jmq5JAd(gjB0w{n4xUvP5UOu^~X0I^iR#+14t3OLhpASw;q7&Dj~dEV$T zdD$YJZ~IHdn=FB4%Rf}^p4`rcjC_2w7DQsC`#A)zVev?1v)i#J}l zq(_iUtkFdHB=ML;1aRE>D)-`gHDHrn4i$2pF3>~Elqbu#dos~9q_)nR`BVg@?nFFu zC}Z?b)^!%StnWJ(9-`xzf$tQd9wy!#1hJ{WuDc}Sh;Am^``9b&YD{lbnC#eKWKPnV zwrDF{b+g1qX5#3TiP^dFYNY%q|0P7aqvY*njd`>h6kHYpoiQRkGEgxROlyt!=XsW^ zfDlI;Z#-_E`s@v(?jJgF8L#jHw|G`V%*o!VFN#SD#zkQi*mlY94OT7%p5c#ykhU5T zmj>*p_xQjgb(;fX=HCea0VEMv2#PV}4sUK2w-%smGU2h>DEgqK-pF9f&^+ zrEqx=iw;wsqbBy1hR~HdXj%!#ATNCN;ATvn2E-*Spn2slXozXe?Yy^qx_vt^#G0PT z-L5;0ls`=fMgn~OG=ty2pw7_ZuH)li#yiF!b%s1oe6%~@U3JwB2$2FAXO!JymSl{c z%+q$|IhD19MWR+=qAE+GCR^giH%u=|g(lo&Qbw2xX9bmlAR@&~h_+HL5*Uri3 z?-}6@6IZZrzlCB1wruVAgvELCg(EP3wE@8mL)lIScPRY>Z4jKHZW2HnQ0x>1wymU` zT<`4@N=R--MpqoQC}G-S#9g4Q@$1*))6;0VRZStf?-l|D)qveBc;>?kyryLEuv9+9 z1$IPtq|*98;i=2q;`Sb$+-vvS);?ys&~Mz)<`(JArqU?_-$QlF`(N_Sjz-u}p~f2%>#e)aG(d z3PHPmh9}^QQ+rYt5j*)tJP90>ozQ^ zesp7t8|XoY(3}1WABk}viB%kZSU94RhpFgCw8mYPzl7-y!?f z*YdyvZm{T}gWmCfE?AodZjWCLA&`OZ?{iB?O1f?g5J_DwxGN7Fx=7h8II^h&Ib0#( z5wyWW$AN4?y9_|omuA79t{`J%AtY7OkcC*t4o@Ou+Nx?MviOBA~UGA;V$gvRAS zjotVU8#YN-#x0ne1)f6&xsYJz9uV^3EsYi^$+A>)qgJ^8!$syILfE}Sf#8=6&Q5X) zhawV71Rpt0@M{)+j8ta+OhMm2JdB(F19vTdK4CLkl{8DWD=cdoEvEaax-q%AXFh|< z(wbZ+le%$D3yk7xxP8|j`iE5KBLQOCNK26qdxkH3jcf1rjv-U=Ia^d4&CQaoDm+e1 zY6iD$h8uM5`PTlMN;0m!aMUl$Ql-G>-NxC|#Tx8~4@@(tm_5%C0O{Nu`a_nW3I-kG zqXZUzxkCpa0Pw6!f4ilV3e@;ceeJ5=TB|0j6>!ZD=$XEaGUnYS!s0cza6<3D@LU_N z3847ZNZ;X)5tQ1+PZb zku=sz0zjP6u(|&Vl?{VkjXJNLKgYSMIGP18Eq~a6_!kGNW5LT@D1^`%x$=~a79EzOhy>ZLQRIa)e?Hk&s~Hh0EH?&4^vyLrWazYe2#M8vNf;{RbIG)T!kal->i}@Tem_qJ7?>5+FJ6Bd1(C#+7eEVC?x@qcvrUFVJ8arIdAv+qP@RP|U`O-llhnvtz))tf;aeLf%iNridwB(~9`Xl{s zLx_%IY}YcYU_B`@W~*c4hmnsFRsa#8ehd(S3~>EOsWK|WVjpKp(?1M(VTK@uT;u{9 z@>KpygyRFx{r<&PF_Te;=H(D|&1d6w%$^JeZjpTF-qT&D8fuq_ z^PG`9r;C!vFFUoMqXRV%BE9b=`Cg=Ah|Gbvk_JM>OrG;*vYgz>wrGns@@AnqyzeRRD()(;%06*dnvbmv( z$Maf0pEV_t4ut{Brts*Czx_s&*EJ}-(zGjSy;!ja0*tqxOIw~PJ!~3@Vk)$|>+w;G z^gAl#Us+g$?Cp;PdaILqdJp-613Gabq0q}^2>sSbcK7w^*7)RP2ypO*+-@|dH;RNE za3sI1*FEasd`KOSNl%D^Qub{XnRyrDHmg30Xm@o? zD~B%8&}E<$wEyfI2cBWb`-=zclkobNaOWuP_4e8L6%1Y)p?~DG^nYRW16$o(9nb;x z%lBL;rNJn`JMT9rnX&ezf*`kzknT|kLU7oRfMJ=rkM||qtPte>SG&l@$<1p`278C^ zrX^!+VTB}wj4A#pUQC4$$M=nvxnwD_XQr{q=k<*%N1cz}Iy#VR+%c^*8kVOXj>!Ji z>~vzubNtl)bj55+wM2wTAZ~vlU~0N$Y+)R=wcBV)`deXu4l2w=U3*#KlLaxu|MCJ5 z5ev4<^`m3g&JLg53VTgPq{h(e;&ZhV&Ylhaz&+CJaNYP zyj$x-ii7ZxmymaC77!#YSlsb0J29nu+ygc*28xnD6T~x1iN@orfZmLdw!GdOAD@Rp z4L4?=ZcI_Zri&h#?T6}L3id1jMGw)~1tmZT`g8e{;cj3V#6}k871BAD<$l1c4ZrCt zL=hF~R`UJNf~8+2%Zlv$mxq;Eruv23VEk`YSmX8!)Qs(EMHFS*gbyf z?8%RYt~v}mwUo#SQ>;XYpGgsk7B=bnhBYS2;V_ALHWAq6%7%@MXbF{|0TAQe*%++$ z?V)xkMMs6%H%%P#5wB8)S`vj4t6976EmkOa|E!SL_(}8OsYTn#u2I|3S`5?5W%?Vr zlI)VN>>2M$JwV1=FVmoAp+>bjMC)c<5}v5Y+gu=0@000M3P0B$07Xe{-nU&jj+Lt2 zwW|H$V!bC{U#Ja_X!N@}p$bkoBYxFW9GG;B2<%ZsAUW0Uo()%XexjM= z$?n@YNaj%Nc#>QS$(WWj+TAMzA>Zv|l@*d2IC2?zJL)2Ppis2_cq1Fnmfxe7)uW0{ z^|!kQ_k)ySfRk}OuLYW40>pgbHGv(58K(8l+iG^Z<6Kkm*v1iDDpZ)0wAMfA`?VU7 z5i7n9RRh%K`^SGtvFcs8nPAQ|cvavz*WLXxQrecLUA0~c} zq-$

x`zSod$LmzH*TS^FHo54ib?<4l{a03p)ZdI5a6o4Nf>FGH6PE|3!oQxDJtMm1iUU;2-*z{FBj)}mwg!GkgmXtFB204kED3&t`8Hec zIVe!rr|3MyahHjvF6|h0Kc{_^OpkO4sGd10Tm=*|bQV+7WB?`{nDQh3vY4Zfl2O0? zqJxQpgF```+w-eO$d(Z9{~YCF-u9P&oMq5_6jk|~*S;Tn@GdwS1@&TM9&*u)h8)#k z@2;kPF1wW~Ykuwd28;cg;P#2_*eipOoJz0y#_xmbZlYNuYpHJgv6bX}_duTf6axJ| z`%v7f0mbbm^U<6g{rVyDz}Z!FSHAWfjnpL0QnQPbXxmG6pDQKd44YA{T@@9H{oW@7 zoC7>-Q6PiRn#z*}6>s&gsQz5lHWd#Q&81dkAWTGC2B?o#yaBK(A{xL7{UqMF2SQ3x z2|g@80YYl6+O@G%7l38%juh_J$YF5sJW-h}{x9ijz)U00NjFpHPDdKjpe1A#@yFBZ zdS+bt@uKlsUA1zbOLZMxal%D@K|3II#C`edOw0#9yEViEA$o^-c=9GB-riR-pnIHE zqg8eDLr8dKklZ6ux3NS=tw4uER`rGjl`+p#YZ~*4wF+&{SJj)|ZPYA`S11%nY?3}( zDzY3ybwIW<;?oac07&6IvyLx{Weo>Y5=Wn%Vw)_9GK+%(i=E2E67*9AoqmKzN$*+* z_KTM+^787NDU5oGKz8y4HNQu1Hlgw*Iw7CXZ93KUAyOz{4q?;AlbFvKpPxc$971ai za7}bQ9Tx!q!()@7Uc11nBO#Fnu0SUiLvE5^r9lwv3;5!E27aXCHSns|PmkHx077%p zmG~hEgm_v!j9C^7oiPI*vdK2*67PmUHCylzV>Erc58<+;F!aM*O~v=`UjIc@fF%%+ zvnG640)X!VRUM3ZoAL?PzF5$xERx1P4Edb`M)9-p5s&P?Q|?y{Jcn0%ztT4XtZwEa zHcNU)Gg%oyh-%EMRw|Aah-!qhs`)w~L%t9O1@%J~yDbIeu7G~6;t-~rx)lUM)S7?0dBCO{l{^8I z?Sv5Q&a=-MIh`9*qz9VHt_Jv1-kIIIWnt4h$$=Y|>@D_^lPS8knrJ{lDMFaoQe(c` zNAqIb_bao!7*lu2(9hK|IgAU>M&>aN5MJ(7-2BN`K%Hd>a%9l4Z`^cyAzJuCm4&!( z-oea_J|i~0{DG$QE*Xm-nC`fE0o!yI*P^%CN8q8zdY~g4Xt%D?~90w zD*RHsDA4X+N2DA597P2enRaU?W zs0a;FEcfHJD*SCsnF^fu!_-UCW<|_5scL8Q!B+*MOYrilcKuZL|9tXpD2fHTq0ANw z*@p1!W8WHj{g}^%_+;MF_Gi@+<~6C`G8!T(c>Te^y_P?q_fXV zztWz&%!ouo&B!K6Pg0V{+B1lOSpoYW0%nq`z=lm(0Ys^5%@^O$S;sdt3%U*9^wO;a zyflA>*8%T4Fzd?2Z5k%FG#k{_+ zb^$XA-2*<4u)e7Sf9KWl=NKT6en!funzuhxIM}GLk;NY{6Mwq&w}dLxf?_kDy3@CE z`%CqTXJQ;)L4Y5Qjv$|X$UL6G`HdBfd9v7jCmn_^+4$)EGU8!het;{rSytcgh7{Yt zBQG3YHaY9V^qOG8terXlAaZ6yjXj?Zz+xk>CXe|(>NT#aYObaSK@GpZedeymeAm!W z-($9e_TwxGP6upT*fRlHpGK@2|Am9!7bwJ0`K(3+cmcDviuA8F8R%jdsp3`;Jv0ZJ!&W>7G1nO%ezl{D z`N!1?Hz|Qj$<)w#Z(o~`;sO%@dHGnRVN}=tWa2gTpRs>N3L}h!<}BZm$HauZv5$hu zbMt(NV-0k=HX0B6(USzPWCNO2Z=gWAR>`Bx7MMWvIVY#D-S6^fJVn?4=$yh*U)KL; zd-Xg`Eq(^sRbyLIq3M*20}H9*TQ87`q=`@Iasp5ip5gD41GA~;e^QG>h!Wv+O#jgT zG_EmA7+-Q^Bvdv&h)ojzyPwNN4@!>s7+U5}^z!MJg*d5ywyF*D1D`}9?ynBKVKJ9W zexsVfEsF=WQz~=s$&gnG$VfBfpIKn4%Fgvq%B0fz`5I1c!y8-Hj$xt{ev53!ij&`50p;MKH+35p}X3u53+X0{u%WT2tnrIAFd38||*# z9oX`oGHvQ0>VN0I!kV-!Y6bOpQEkuFpLQR{fZDCHvNDe0^*Aru9asKp7#`42fIO<+ zZtw@6_2;qNWn^dePEFw+X!g2VaiU)Nnd6;B{p!Jr!PI)|3-e!~BnBXvj=`Lb@}!R# zB0~4f_CiK#&WPr)wBOt=#Bj~|S;eS9*6_lkIU6zn5;+s#6J92qDbJXam&;$d&94v{Av5_m>6xs_`RVH(Ssk zYhDa!az_ZUYGAhFjM$_RSP~&Rq9>FLwpUDaDtMSF;o!ev51PgTLIQ-CrUWWX33^l3 zpg2OHkOur$(W5c1>>_?1v)d)C$ac7apXZmpa8wYd0Sd&t0X~kEEx~GL;8`;yMN$vl zGLr!HlSrI-2R@?)if|6fw;c2xzSnV>!Ks(tUn41=00}FMWMHdyqC|_4Ix|d|KLW*e z{NRVBnKOojab^!O48yC{c(L$pRaeZ%AIw2-fQF-MVHlt!9k#4l1z&Omecrb(((jRx zk_|;3ijwo?UUp7Pp~NgUsJNS0%zgY&!?I$A8NcpROl@~L8-s=jAb5II_gvq?rf*{( z8^nrpM}DBS)~FzleMPSEd@tEw0OY_Hy#808X3t~oEXqnuK+N3#?dv#TH6DmL9O!ed zu3CKJK7!pIv{KN}2(7=^{6!QB#9A#2r3ltbdQ?YtUAS4m<_JwPlz8m;y)>f#u%l}k zQKcPw9FtY`NLWOYTVImVF%iIcYOwMlOEo&6P6FFp0$3n}t?qJ`Y9gQ%+V@3;#V2X} zu1XxuXA?nFWIr02v$`4|QdzmY)ip3^B(mnZDTG3lgxac6AKIEIWbLUDP<_)6!=s~C z5)?hz$7l;w1kA5zLo(obMCpcK>u?Li3yr+{jvI}E<9wt@v}`K%oFa;t-?Yx#ax zWB98{$+33j*X&7m3z zS@$0ou)CNe&0k4T53>NTe}rW0%`aO!yW=11xxgO&;|u^i8xA=JP*(D;MabTL@jY~F z2I~2dw8wft)8I%90B8EGPHh&QIB?+o52d@c*<1^O)gI8Uh3g0Nl#rItIB7hIL7SW6 zD5GCp|Bw`boqs84r^O3mBVu~}ljcv~vqER$5>ueW9Y;*=4-0kN?K9)XjaYWghkR~8 z6^V(w-Tcr1G_K3cIa zXP8{~_8V3~`*-w^_P=kPo#b4*ru2(`QQ$tXpc4EPP@oJ3i_SZ+TXPmA!_D1^r$X=8pF%BhH@QdP2n)9QpT?=7l`Z8DD*eTWtBp0dM5kru%1J&rcuS?lz1fnC* z!^`7fwK&M=VfugtB*CAq$#%Tb`N*W%4J}Dkk<4(qprD{T@^boP`p+V(yG-)*F%X0` zMQZXHs5KWlqXCovw?j$d3_(8$uTHvbvVeKK1RP-Ehw*NSlG zH$WG|`y2h=zm9KlALeW1bIyS3TzNccTd~Sy5327-;lOh zj*b1j;Gr5T?K|#bmpf+0`lcq=XTJgANe-gIX!YB|2Y7Gr>SsJXVMh+bH`PB56npuz z7jLpBl(1`u&xP0ytM^w7uKv3diz=HNDLg?cYu z(*aS?k29ssQJDXY>X2->V4A5>lz%C@+gRfjdsfV6K5y`70Uv?(Xg+!3n`FxVr{|1a}J-ToRJtZUF{| z-~=bY;1JwxfPHws-L2i)+N$}-!0^nyeQ)>ab50A^#OYb2bzS}}|D)}iE}+!T5zeSf z#6@Y_oz-PsUteEPC=+Kf@!vfph&$2vm6LXKFs$kdn}aAe>&3gWWDzuwW zIoGk(UUV+;SiW&dx_-%kqVSXIG5zism)B>0KOz%87|E` zuNdQ|gv%y5PBe$}7XGt62paA7cr3TPv8K&P;89c7DF$k}S^@Jqv4e;=Q@L6`#!C?* zAE)xE!1#+Dv=(-1+D9TK@Zk~v80B%_o?Naqx=Fr@-qe3<>5Zqc)kYE)SkjH1o8C_#FN1-)SN~In70xnl;!ozMPt?fAdn2 zn8qD$EioRZrPXDiYQ%Ap_&&&=Lyhu4*07A76PuqD)X8IClLy_6guH@Y6xK@#5M!)& zXXLNX-iUw8y-%tZ_)Xrax3fbor48ju6JY0?Gs|xa;VV^NpB}G1@3fDB9qzPv**PncW#9F#uAR_ z2!Bc)z^=lyLy!dWpcm{PjFW+j`Zmm4eZoPbst{O|dc;pOF3{M-8_<%2)kmk|(h5 z>8nY>iQS2{uJDios=@f|PIvSzF!Y*G?r1M>F)gwKl1M^<6c$GLy zwcbSj=}3f~P@swvg3i5ojY%Hplp3yV>YX*2V#AVDiwE$POr5Z}Bs^Y}CSX9f+`HCX z2_8-WE&@;((EzE!abdTm&Z`12kXoC9!lCp@P%Qk+tgNohO?pX+FTF~0jJUu3ajH(E zPtn^PU)W?!yJ0y=C}&vaS z(F*Z^>*aNkuyL($qrMwz#TdlS&N4oNFCH1Y-);ZOSjFZeib$Jgb&GV9=E%C5Y`o~+n)`Ru6cr9)xQ8yqZhalKhtfILwyTqX##vCA5Fo47VaslO?SQz|i& zcaPTS8?_S4cTa++ue;VyVh9CF>0brW&aVIxSPxr^4W32s9*syqq=-}9 z=T_82`QU{4y{by6DB)}RBr~BILZz-pzaiUv zSe6u*wK@h!OP)fMtxnX`4+aSeE(kV?nqH8Yf5AV8uX-@{=fCK1W|5RQjw@|74(ix?`4^@`xtcpCX5F-lMYnDeMp`% zjhy2p=27Db-PwlFHEGbj<#c&l8G@H&zMc6Ww5OE-a3&kxJm>$S?XYOn_qnt(uOZpq z**V?1#D#C~w`ImH;QUCgfL8FL+Tjn6oISiN!~ZWxrNM1+!3h<1DaEPP4Az!YU5R}6 z2*S)np?CN9KKj;~M*m@-QqR@@c#I@~i_9_>SQvx{fYw||YHW=Jg?{%~`4m_Azi=8o zbU>D-0=8LOp0sGLB5zjj+}!bcta6i^KWF0odwmQi;?Phvu7OcOz3y8}f`xf)s2s&kr{{7|Kx?7kn_6C?@-cg$F_Ae=yoL^;6a{B)-U^?=sirI@eHLHN)XF9cBTjnj~~JZ>ELp6CY>dp4-@aOK!nFk-7=Q)RYm53%WM5_SJB!Gx4-YQ zMg<>zE}#vhF7@s#=bYYkmKT^chUc=*B8MCOu2=tH^d7^v*!OXhCr88!154lXyOIdN zf!Qk2Z+{PzneA4c93pcOTHc!S?mn9!PR!;0Uk!vmzF7o&e&QfnV2n(F{8oXz&MtBg zn9H~`9d^k|wg2?zripe%tD}+jpaKTv9+sE>XI%GFUzdO2Q}qZ&;c{+oozmUp@(Ltl zn1gMy1+87Bo)iOBn*rpsn#L%q&7AX!9tQ<7F*g^vy=_fL`__T@`I}f$mqw1cXy);x zFbzCy$qM_`3RCPqikvyjP-JRiUIxe>asPe)ufbDSzrUvP?j^||b`$dVx~x4Dd<rIK~a8aaR+G!WRl|HZ3M;S^TYmylujqqpRn zK0DFJ&4qi$C?;-_fIK1zT1ivT?#JgIyx9%mEqpD$ zblAH`iLI6=9$l=I81o(@Yj!4@>wgr5|L~+@#9_x|C>HL7H^6rmgXB-9NQdL1gv(~; z=MS`ucl1Z*=O_)%#6~qy5s`}3k(|RROK%eNC^i07*|c zas6epRcr7w{C#d6Z)t9emNct)?B2_KT5-35IKx)>sO@49*~+$wo|88&mp+3XdrK}$ z{(!~DUt^OF%H!fkO|??5TepH}GVn3wh#GIjs>dWUGZoSY1K6VyOK zR#K#G3|_#so1U`xb#FR`l_aNY@Z=*#WN@-22k(*2Je-1-9Cx_97LkAgsJEou##)<2 zUF|aO$iHl_;yXdLN_U1@oa?l%Q9G4;*p}|iUv~53y+L@XEcR0UW#V?}XCbE>`}waY z`GP7dO-m?)a%LfCN+_1)N|! zfWZJ{RsqNaGG7m#&qcT$bvwlkIAwB8gS?5$y_dmnm1-M{yV@!@r|_RMza(faq+~R0 z_{~QS!_r8~0vyU7Y*XQ|raY;z*;$>s^U2z#Qvg>1dLsBCTm3x45-R}XK#-8Jm?nf3 z09pYe_dBim5k}N<8S2y_>Rh#rKk8C_RF5af7&h5FDGs5d@6coA`p0&GMDTyFS#>kF zu^-gydp8HdlmQQ*?MSX-YNF-#5Vd*=f6T{9xoVO2>hDqc=Wq9Il=JuLsV(-h!Vb){ zgQYkX)hvWg@5;C~;)PbEot!wtDaS>77BYn>UIM9dJWuhl3!sjBT2k@-h5_-!VP|5? zu3f-jWMp*PA2U`dY0q@KpgqQbvgWRm)!_A0ZSg{_aj;S6%ZU~wJCJm6ISsw2bso3m zGd>S(Z~3uD&1+D#e41q2x>3n!$JS$Z-!pZ%BpD%(0l>bXM`5il&7yN?0IQCt6wqEaG7?7b8#85vdz}%?=>W%_PJ!bL1;Zl>^`L=4n z2Y*oh3TltR5;(g`pA}=_FwOu;lKuI#elAfY!QXN@TFX7b0E3$3@gkqwXNS$Bsh8vRA{^ixl0XfGrHKC$uP19 zRQAp%kh$}ZFQ9~!nzKg-dcgKYkmX*0m5DZh3tJ9^Ay>tT@c*a<4WC2*yEr(|& zVNbj$#p%};L+syrSmg2aai@1KeIEDvE{37HsO^;AufzUW%e>7+k55U^W{i|)ja{%M zL&s|xEE6QCiuh3!6dQHzuq;@AW5sJqguL60I67>^A8r{z8z`eKp(ghF?8UzM&o8|g zH z^zQgObO{&u0t(xQOg6wcsKtZ!Tr`7uVP^mOhl)bemZUe zA~|GTbGe`Z&TN#!L*bQ;fwe1xI;a_EO7~Y}+`m}~FZXX>0{XFLd%snhHZ)H}ao=xM zl$W&yG-+e6P4JqpsN?+f2MgNJGZj#`ert#naK~{l6rjIFqrDp(O)OZz?TN->YR!JI zwRI$5gR6NbG=7#uU9Ca*6AUb@qK=LglK6>^jsG`W`a?9)O$OfDGv#=>ODi!^>d7GC zyqThQ!+9Ppqf^xpPjHC5hIdLz6#swK=YP>p4iX?q8>Yw2Chh7TB#bk?`RezslQW2q z>^heS^u$ur{e`QUzAtNFpWj!z21JNe`UWk8AXBbIa~SZz-?l z{T3kHFExtt8=kv*L^LKr_Fz$0Dh#y^6Yltz9z?y4@zt{ph5f%dI`YZv5tqOB5U@hH zFm*qFvlzK$*qc7zPLfKz)ptL}JI3z$<1xJ4Htn+a@y+x;BB_j@o2i7!L6wqjDscrS zWle!8ph$3u%hAzeAAqnMCIr7s!BH`Y*9q8aT?YQcOJ$81mwkgIb zC4csh4>o;E`xLu=uT`FCqP$1&0B_1!tI5>;SY>XHspNxh`_pTEMov|@t_t^jjh=C5 z+dIDkfKoo2+*5XN?b%H@=FDtyOUeq)c4^Urx#q*3a zs2DSWX5h|q{G3}s2>=ZO>UY@dM3>5yUZx$cbsg?VF?5~HT(9b8_!KPl5E{#uypNgk z_`jRDF+|#g@fX@&CVRJ9(M(qFxsv}Wye5#JpP%IYfgpW|=6`V?1JU|H(7ndpal8Ez zzTf%CW^HXfvAi6=F_5^im0_x=q!dXK@A|n94FJ^Hh!D7*k19l<=FJC_xjg3x+@24j zm8NYGT*u)jm#(k8s~r_r7{GyL6gt^*%LeNaMH?b8By!+T zyQUY34ormYty>jAY8!3*>trRN1n`~QbezlpzLV3Wi*bL0`;;7qL_$LRdw!$PWKX)3 zD``y;pPxkelo^{{=tW+RB-1ec z%->F58HEEK)IYRBoJ zfMIO-+vfZc`DYW&F>6G`;K6U8{vkVINcy;4UyZX=pzlwF!&-b9l`~fbBJP|v%HGfx z$hvv56k9a!`$37X_wOpI-V+R$^}EhBUyv_zGEpOD4VNS-hpml{6%F&oA1NZ=S+y}Q z&;GTaVT%2)1q$dNk+sjJXvVhOHP#{wOfVqe-Ti$z507&>huD4WLt=k=zR+q40mZ9UWJf!RwF(b>?1M(cWh#NXz6<`wmO zeB~Cp{|_6KIzuaUb?lXo*Ty*#{zOb_Ioylo4c+UP{{TgQ&}I5V`j!z}DIjb7v8VsH zLnsb;sw=m~n|hz%wBwJB&J?NDW%}1(kc$8)>up4jHBY3KrhdM$_9w9eB3wurgF0?` ztcEyU%%vw|;^$vK6#^t&GWI!7OS=QS6!qw<1aS+N920wc?P`ef@w)sJyJxF)+)-Qd z(yCX)@t`khBZR<@inu4kn0C=VzN|5Y$TaZ#3HFFyw5JY5;)I5K*Q;0PSYAWvV|Z28 za({e0cwBr-{{qymaA8FS=#y0C2<(s8`AKw;#G8P(-5TfSByXC3 zFVK)X31-1EqApt9wQ-+#^go{rl!{HVv|-5OlNym_4MggOl4QryrT@<2&NT>RR8=S| z6LqbS9T_p#|C+w}RqqWR%kmLDwUy!EkU4=}v#6GNxRk)Bl@DxKNBc4bU0mZy%lt-{ zw}t@J@fBNCs1{sU{Qv^eJ>R1YKHv^9J+zcb=>R&dUk@7EgEw$phdK#1PO>9kQR zp`p((Fb!-UZG2GOv}A@1O{!upLj;lo`S^iDw+FJpB+G31Op(BUS(Y{!Gra2Fr@t|cetF~T_HQ)vuoQ7CQMg|jd>^c~Epfg7(@z4#;q&=lV2*10^e#}! zqc+=?mp|05=9d&3_;GidqC~bUa%gse$@HH3gUL;zEr~FqjnfOk$YI+Y$`9)=pGBDZ z+(rFJ4z;iK>dbqaDv=)Ib#g@K%20cy%_qviP*1H&)2hAft*%qlpscua5cB%2Y{eN+5W1SjkeB5 zFwxa~(Sb8g=5Ws81tjoRh+mo4Q)!X%3sAI{ zu!|0EIk;L^<1*co$wTvTKtTjgAN-I^SFod@i{YFLEKm+wf7PXVS+p+OMQ&})pt7@! zeN0N@G_)ZLG@7-~Df|=%kR_2KtHkn1R5FX@;H?z5}x-CC%Fj5wwO0x*Zu+8 zRAK_U)K}m%po~}nO6oVPgx|s;I!pztx)PPk&GE{*5w0Iuqxb*RqRjl0>HnAZdT`wv zrf?eGX#T8#2VRZw!FzQ>MFD$FG!$|&jc|&S9oGULrlkkUZeT(xsq(B^smD@EZ`AX< z%>=-#u>7nn9zY-Tcu}ed?fid;jBJ2ua!qaQZIU|Kh=GSLE(4Hr`>kO>tb_|@00r%N z(On;fnLY&WB7{~JsfOQ!FC4G2PIAoZNtIeA?-oBU(g641G(QtsU9 z_cZ%qqlfr~?aZ$3ezp%<qOK-`C>K1Z~bx2(E1zYnC4ZsZg;}LvakMxcZpo| zZZ3)mW=7pfUD~(98l&R$G}Vmb;G|v_m=iX58Z0 z(A=zMcD+}0v_*lco!b@#3$b!eCiSSLM5szW_!FH7`774OIqqxadE?sbVA*yCbbQK@p{p6%bt>Dk zzQp|f&knS(jPVS>GGwW&cJbZsR~<#R=WX2c>k4J7H0T&5a3Vf@+os2E z=JK2D!~gQ`VRq_J2h7V-$fnVkQhe|hX&)}3Po6TsSNl=hF!gCIeY?#n#i}VC{6`h+ zA1!Ui9RZYW+CN8>8*nl+Goib?%$&>FT|(E^xGDxb+4VzLYPD~_sL1{^-zP&SHqsqF zhXyMM_4EMRPK5+_@)OvKn}e0%4~@42R>%&a0I0w&$OlY z-=uwOr9vl_DaB$S*(vVb&4>I<3~ux&s*TTY5?EAZnJ{VOXm9Hl5ohmjFoh}C~{1fSo z820aaZMp$*D!N{52fQRV&zTYfkJoy{-ghC3MGu ztsZ{;4^1n;i*_Q&%@jfH@@^9Mq??UsuT=-x1zg;Q(fr!5M(-1|CeULLV zvpGDo=0&G^?}iJxovaTOp%mp|M(Zgjs_z}!X4-gzdEqM8Kn}_y)kyzoUpr!ex6N1- z9G&)qF7Dlw2*C7tJf^aj^)KdRca2uK#aO3(<0C>(adtQC!q^86f<*SoK7epft_Q)8 zcqYzRj5>Kn~ZS$G0cQ}0@DuOo&}_#ZA#WViS1 zpN^@}U$$oG}Zz>IxWVR6wLsT~QV3 z(y;Aw%URJ92P6jkC@UE&8$c<5PPcnveL{g7>dAko}#M_LXjtFp`gf~uUz6etOoCA?@6&V)SGs+2Q^nk4W z7qlU_+2JFgr({<>k7Ms(LBc3b6 zM2L84_OB}FY1Y;1JAHKq%J)gK2TaQRoGt{Bjt&vd2<~q@6iw!m7Lh%MF6>BBBYXqn zltnP0oBv!~u+mdMeEJpV)NeC@6X=SMzY0h5xbDEkOtpP`reORoI;O6Y^ztLq#C#AQ zsHXea*%lwOaE2tEx2P;dsUro-3#sSIP-RM1Ra}z=2Rfid%HR9AoALn~j>4)k1kQ|u zFZ7G0Q^Lc%U7sHSINeIgAPoc{z-fIQURkqmxH*cFZ23Rkr_vZv=Jf=0CFXQC+0IOA z{bbI-x^y!BK;nLj^0}W1jrQcq-^usT3_p)J#4k}V@miFknQI+>dK;$as2}gG`LW8DLiG2iF;d_vh{PCo=V)%-gh#uJC{%cCB=)@3OhA_2K!&|4zJUo_pHW9d*}QV=kFC^MdZ|$%W)Gs?OjhxVN44owy&* z{&|VT&6`%-0{()|#5C;@le9#Ka+3s#efAG?=7*0S`mPf|dL~(u@Knz3>H9ti`nL&Z z*i1B~J5H}#_kG7o*;z*AHk;Ohfk3f95*W=NDIkOtKQ4bHc@~%Gq#b( z-H5V!Oo;(g&?k4*I4g=@sB%`@h+(I0W)UOu9PCCRJn`WIazfA(S6^-Cy8;TMzaL=* z{FWnFs40}j+BnI5XdHnHcG5&rVU}}YS>ce5&jGeMKUCDbS(u}s`LvIb*vp*$#qo-V z)PyPM%whc#+iw6w;$`lrm#wXBaL;?2a)ZZ-e+(l^&-?aYI*dTC#6R~leN(or4yn2S<9dITnk0!K9{^!^UeY(EwWED3ztuzKFd^Ugq&W+XRb4E&HOc|{-N z${cqat}kEoo-~#a)QLhF$7fO7*X5e9l9ntHY9JJ{{)cIlxTWWU#j3nT@v()RvpL6v zUy#43YxRXo1-tk~bG)AHK*F1caWAEMOiI;OOLB2?EIOhzzX#mM&H5i@vuru#wY zGsmC*9*|xm5UgKQPn8P{lTI$nY&{bT+w^KNRcIT)R#%oH^G__kg`uA`T=i@xJ%+~I zY&B&(+l3aO%es^De9k6pHxi9&G+?`mK00w#f1z}@I59OH3`|U;AXT38F6MkBCnM{= zKkiKQ*cn@c-=6R8?RB*scl=)*TY$=7$l@=q4lW4J$ClAY-G%A$W(h1>$5FL9iz&fIn8vJO2dm?rqNJrTzN+hVWbcy`$R#Ga# zd$y=;AyT9!Ea+$vqjK;P7q{R#I{bzzp8yn8rp`XI?mT9u zjih_!GabF!W2<~xrfm-yuuhDs`)w|S1zpKNWb*f1T zvf_7y+;ZYRVK?5NI0n&)n;T0pnJX@{z~*#A0kjepWhA!9LEgIW=Hv(>KISshlYcBW zX9x-^8(=pRUSOgPc&+{#`jkjp-+4kELlqtTlnAK3+W<8JB2Ip6=40gnP62SQ4#|dR zP@>(O3v>AaURyyjM1*iH_LI3`HB_!4=NAZC)*Q8RUEkTsZnhuA`?pgw)58gwKEMGW z;5AY?hjmD=jZmnTvkXgTJ^M(KQ`F{t`)B`XDO0xm;4Jp!V=GBg85;2fMkS2MFYIRz z<#Ni(d^xo!k>JPQTuR@PAgtj4wIfXnPX!rOTmsb4TQ!jDPuJ|_Lt;8KW=DL4lzh!U zJZa8;x+EW`=_qF_e$Xi);j{wO63;3x-lX@B`{sk?HEQo(jUd#JUI^*AexHfPkG$Tv znUwopRw0mn-LMQGKft@w6aj31G~?Bez;P=NMQY!tv5#>H@#fy|72_(xlA5eFfW)gt zpl{js@;da}JHhc>8pnj-E9_CJE?;4`Gl)ROXr89y@Y-soC(imr-?;GtwO<8%rwt5hgI zy|kYuv_?`lg!jap$%jk9=vKt>c%MeV!sHf+F!bAOfA{9`0n zaBF3ct<)VE+!%S&gU+#*ul`@-?kgA%_^X@SUZnL~w-O6<*KLdMWzlQB9Xh|xavydN z9dSoN)ayHW({l4>z`tn~#rb0 zF{^LdG^z889Xac3MB51(e)!zhWt&SF66-8>oM2HfQMQ(ux&YJnD*eQL2sawYfk&%DGp_5_3mlGAAd(! zZ@TdJ|K$acz;}2Ky6fQjdo6R)K|eXIu$)ewXFUEv&{F9@&G(U2jJz%(J}WnuB8W24 za@?2yTtB_MCxtn>ggKfTHHBJcy(2_%mAHC5JD!#%W<9!W(uF5xdAIWSZ@87$yV-1Y zNxuTh+)K#~=O>$(ry!v321dO2;RwPA*8rwj^`<*U172pG9LRyVC&=wsIi+;%BWSMQ z=~Q$Bu{jgDNO1$vjko0pHcRXL27`kglR4Sc#1^yiS;mk~mfmh;7HD(`ZFkeOu2SsB zPM)HuxC~*)3JZ~yIt1Zj8Db){Ku*~ej-5Q)67ML4`3L})Lo`)_K`PtR;#hM({Z*>W zLi9(1Z+k0J_~UQo)j?89kUW?Q(!htJ%pB!4GGyLz!jmAta_~C?tQ6tNMt2zeNJe>0 zh{dYM29uHlJ++7lpD+!c?A@4@-#=S9C7YdeFJbMsA8qy1g7JKP0Z<~y*LybaOeJ@mk5NnvfLdAplC&wh!Gzt} zNC}b=4loqwc+XTpo$L-v8)ff5u(5o62Y#tm@E4!%Tjo}sdYqX&q*JQ|S7Qh%aa0+h z1V_}M6;MS->%|r2XiT8oI*7!cdK+vum4F`)wSg8NLv=gqpqKj5+@1;%F;b-4+CS#y z8zzw@e{w$?0z{?EYe%8u1uVYQt;PGxE6%KKHK1V+JlJPwF&^`HbHm8O#@P?y?TC)v z?g?GbwI{N@tTi!46AKSlq?g!m)sMEKH{!Gu$guc^Go62LksrnI%2u$4$MbU9(`!=6 z$kyi1+@`;k{^;>2Xkv%~)=apz6ussZiex^v;q6}*zjpxaoL9F0&Yke2iukqa03>;< zi$D35nYlSwu1>_r_7b0nv5TimLNV5D;Rd|$(?p+$ z6FIb-`!8^U8Gjlqc&C(v^WTfqtWP}T=C(JC!x*7`P3Sidp4ost_#Wr{T=={u*7?aT zX2Byo#qCc{CkORWVM`K57jX|#4cHzdbL?r7;>6%~g<-rgHOd9MJxH);Jh(LrV*B=$aHsVo5%DKZg3A+S6cm6-mSkR&HAFOLiejS z%EUA!R2fVVRo}CJ`7^i^k4^i$9MTvS{CDRav{T{avigl#E-C>UBEB9OMSkz@4S^bF z-1-5GKIO#YvTNHMHoPWC8;BaLKMwgf*f~a6I0QsRwH&Zc95=MNm}~?`T*T*koEn)T zK8TsZv7sL+eDC&3tNnGcKZq{H?RN}s-9>14M0QV_1HY%y1p!QcchhtSiE$DK+}7X< zMs0zl9=Dp7_UW)vp7^C5em5J`o}YS8lll&5`^wS=LhdGCY~(svYCEm$oTGVmXp0hb zuDCqkKZQK({yX?IupjYf?-vW)bTvoZ$R6*H7U-#`=1==)=>|L;O`RY0aRdr*k8^Zk zuRHXdc2~q=QlW3|l;zE`dT!hi);=q|UED%$?XkN1BE?%e;5X8!+Q>jv&Q+YLN`qgT zy!Ta|M;#RO;f~DxG)s&Q_MmB9c{<}$6u62i-4KmOC5Yk<2Qk|}1;TPRoMCBsjcIyT3;6bp>% z>wbIGP@*@|Y0m?XXgBht4DsW&4kFyc6Q*wMn`$lMDC7cVd&T#FCq;n#sC(Vd{N=hE z;51(E-37VPhLmPShI&pBPbSABAX%z<6hIl!mTWeDEqE-6if;P|R-)ZVQS=1xMp`IB zsk%)>Z^jwXZ}^N)I!i1b(tgk*lX35i^mNR=$U9~$?aaOfY!SAbLg=32f~5{*+EH%C z<2L5<6KS(KCMycV6l3-b^&hDyow}9p|HTJC^#hZPba$bqP5mKHZ?RrA_sU?cL!Uw) zE`__?57%CRalwB_LrfuXDkD5l=gTTeN9mWygucdt>{?K%E>bk!5%uU$_P=V`ZmUhn zZ#hUWSM{K}a_1x_gQUcPUa+7rs)@$=hKNw5sX<0-T%$atCbk6Uyz}X{&o}nibHZ#;_CCyBplbra$)zFKvtX1B$d5u-zmPi? zNj`|)O)M;0ED9yNh85{#?`5;JF|YjfQ(hUYD+jc^kakLKRBdrkbfit4K0y00ceS0* zY+uggGUId@(CQi^^(5D;T$9Kd_XH%TRsI2u#jiAppKp|4JPoi|vUH2Fu`Fjx@a6#u zN&Gjst-r4*xx9P9bCX$-J2qW=`VDsKmRziRW$&OEBPBf1U1e#*WzYevo;50___w;5 z%TS~g6&3duRY5`B!z`QTuE;Ney)C-k%pUBVI-!rn*aT2}>_R<|Z6-v*PGxNhTPC&eHwT|h?w($9Y&VW@r$k@uw8ANdRZW@CMNsk`GE2t(VpH_uce*)zvuY*7qwt*YR4=1`4}My+AN-I$R>If(VT-kLnWQ)V7R0{!4JYar zhT0l8sPBS^J)nENC<-3Vp=lY^XGn=9C3wq*GAlS0-HDHkO)hVv#PpX^2r$$WAT?Z- zvR$+rslqT5E_3GcO0yJTF~d#rwo{3C&@ zQO+%gB$mmG?mYz#3mb2y<9+__X1}c^q&a9zaCw&Y^EJdA5<_j4=fYAy#;!)wC9`K? z^N3y-S_<7$iU;j2qph7pz!DnhruKf@~=k^Pe)vve(n401C6m8nn_ol zmmF@hi+C1$!z~1n3PjR=FAY95?8Yxn%&kC49osaCW4m`Oq!{wKnv}_n5#js3YT9mE ztgx!}-=@b!!+n~weSN|aN{Vs8Pg+wh5cmoXBHTNpWw-L$O`v_0#;lL<2pExlvDWTE z3(Fs{LL2F8e^{ub&O1ne?HW_uv=hV@v%^Z~1082DA9-T772;OsEKzT40Axcrd{YET z+aeH|;@r4jL{g3k&Jx)1N~Znb{EP6(TmED3h0|?f{Ql*qTjTfS#~7OPM{>Q#>(n7u ztq-NA-cS6BJ6iLrn*+|3hnImWnn1H=e1J!0U~(lh7W_b+aQL%$gPEJVHO#}pND1E) z#syWQc>u;T=g$+$@gal4eqrUTIf9RK@{593!#|En9;{P`nZz7OweUt>bE;U-=Ji}? zVM$z?T)2COl-`-=xKYQw-rgX~eR#SgM@EffIO(Lcgu~XjMdoh-iL|0a8 z(qG)d9bqEr`iuZ0KM+ z-)*EyGyi#x6h?-sl@z|z06n#C%tF$jYK&%|c*%De%vX&#BUKnJ$SvAhTQvZ~2*vi{ z&^pfN$e{$`T71<4$`P{e_%%(H5@@40<~KY~0%f;W*Tz1NVMoG6iSJt&4(l?c4!|n}r~Hdm9bUk~MU{wX;@F zS4YH|AJ$%&Ad$|6(xgIaNhO5hw9oi~h8W0qSjDAvL(!?8mTtpWYg`2npsq_QEo@9Rx}Sq zWkGXi3N;JcCcRy;|6O$7rNR7&R&YKZFN)JikV@V?A5m41m+97buVxzBN3(lLeyRZ6 zuJmPo#LA6_l$b+9xz!^!j^*zuD4&;PrpQyQlbR2?h|!t1raXZ;W7~Z0OTyGSg~{K? zLlo^#7QZ{~m!B6lBrl4LnX%O{Mi&ozEPLx~LpzP_i)zmc0W_+z(<#lrMf$e^(TT_O ztq~{#&kEF;_!c(Le3Yd5wpE)kL=63g&US4y1?dJDgA-^7UB&OSF!Ipe3z7}F)umG= z$$9+}bh|1(YYEwASl8Q!<_~3@=mDt7{l8r^DtTmuqvOfRM^&^NK7tf3zKV9wHmP}{ zUUuWN(u`V?Otp zw3Oxc7Zx09(<&z{OVhysdun zq_rEzm`}Hm$ALLM;T{m6`ZjNHC@#!iS5C2uFh!?pN%E|PK_aJ!j!Hh zl2ZhIA`idQb@qYn#39lpNb&CWF{pM#Hs);POg33n6 z{V9(OoSoAZcTNeV-*idFcP#eSuzE~tJMw*D896zV|voVy0sM_=( zyW<}SK?*v#)&{75Q-k4+96~&#IeenPU_3^fd8BXIBuhVq7Em(*jQ=mLu2z6n+)DeB=g*(PPdbQ09`*HUsnG=1*56JR782cV`w@Nr*{>zc!(0~^ zg3Rx|{qJ<}?$l5n{lUXut+2FM%;6I;6YBSR1I*3LUbwVh52t_gqCk= z3{887{8-A;opGF~0nIO^hHo;UjqI5Lce9rX{4!aweu~BJp$j&Ll&xu%bV&`@_WkMo&05r;ujL}xM=I@e9P#bvBcL8fX|@|0Z2+r zB2anq={4HF#KyZ8r0Pz(Pu8vH1Ajz}3UQv|>rx4-Y)-UXAc^OhPF{f{6GSozUVgM) z79qvA;=*IRQAv`oY68BPU%9n16A0UP4PnwA#JQk+5YD6?X2E-V^QGvzPG}va^-z&M zD928!t_@2h=Kif`OU6a1$O-j<8?{QpC@yyH>2jvYD~qU5q!m5F6R9P_Za#8f*J_Sl z5Gth#Z~og;cI#%YjbU=TT^YWYT$-Ku(Vmnxl_Mlw4=_ifQTS1im?CoEQO0ILWEJy4 zP-)QYXC-U0d1WGh!%xiI1(M2ZkFr0b!c{0Qa|8-Py?Dnh22VS-r9V{(?^2JGqQ{iW z6?L}9H*_#%eQT_N=!LA}oGe)bAZ7<8fFbIgeQCv;cY~C)mx3QDbSn&~8BQ?4XD>5? zQ^l7?Jt73$aI-D_y&Ua{QV%}onErP4E+^L-R@;O@$R($R#bI#RH$JW2a`y`E3MIey zY*X+b6+kwP(9mtI=S1c(EIFDe^>K%u0bqm&yT3bst z9Pu_?P5Znls73iakDanvaBNP7vShnXu-~;Nr!+A;>l_!|*m_7ekg)(xPft%;j{`A` z@$PKbIqLcY|7JEDdgHbZaQAaCQJ?bJ5i`8C8yVp;{}70pdd{LU%5Vd~YNz}*_3YHf z^@Y+rPciXzRU)KvsW(~Cg+qULrf%&&{U2m~1yt1C_O_IOBAo(KA|Ob2hk%H*N|%&K zcb7=FNSBC!N(<63BaL)7LxrXNpq)%{9I|<0d$qV%_@EPOcXg@AANmZm4u&&$G$$*c-(;BN)}!b z(A~km;{QdhlK|5t!`5J&OwX-z;ze#Q&Pj^0e8-@OnLYyscdU{Aub*1_pNZ@q3Ak{! zePZBO2p*;X#op)D>i-U15^?Eo_O?a9c>IeNNN5NtApbZrkSkN$=7AXGgSN%(< zR6Nedg=iTpq=)58rJT}vZIL_J_t z#Bfze3I{76Y;kT+-_2L#yP)XMCKk2SN#xVKka;hZ&cfL5Fn8(Ev0Sk7=F>r z@e=1NGUdux33J7_PfO>$N`LFM$A!HArgr!ZvziR8F8fPhfc}dJO9fn895ic;Uk--j zvKj{M2dTy;((cZ;DEf0c^M5i_cP6H)4hi(m-0(Cj_h~J!L*jk^j`5xy#b;-^tm>yV zI&ww)%!lIVlCWUO3x(3hv z{QNwBe=USh+mz*2Rwh>F)r0h~;B7mz>dh{r2GMsm6J-m(pGy@a=8?iL7V?y$I05iP z$l4k8GJarKiKp)>+KZZT_kpyn$vnVpC0dZx-8915+DA6DUWq+xT{xf;k*+S*Y0zj)^@rS2<@7;!; zqXp!TFEkz62_%SfQnz(CWt1Kfi9(}3`UNiS+l*wQ3+u+$qIueI&tHR`T`o3(8vN zKmNRR8B|S=?}DO=+$$t!>&W@!b7^}Qo7EFG;2YwHFH9r2yd4mNj$)nLrDo0Ev%~+G zz1i*5vB7vgrt|*O2_wGX#b6&&p-$y3`WctVN0Sd;znr;)MmRYyRQdF!J%VA|be}io z`h)P^pUOurS#5p`@OuzrBc2?-BL9&K?@gTwHa7i{;Q8RMpSPC6jTt$JK&;>6)uETY zxzwBTN^&i&U)!|_((%1bC)6_X(1UM)c7>zGCyj`{|0w0V{H;S}1r_7Y#w6BeNIx3x z@`_j=e~Ui_HuYmd)4RI(obfVP7^F*uwK*$a8MYO)na+4GZ^oLtRzwfOm7HeoV`D@+ zzYHc?Gfm$`^3g=o$2i(Fo$7wlx79{>jseJ>dkJk{3h@^XbY}?-FcCeL2114SL$oa+ zF0iZnxiF7pZ1*Nr|Jn!B3~`&A+b6x+s8g8D^*ROr+b6PnrsnjT%YvG#* z^aJO^74&l&Yi?=%()s9b&h+H0=$)p^4EtM+w6fC%_K5Q^uzw4S8Am%spMRu?Io7DE zuA-;GmoxYxUtFCfcK2Id^N($iiHZxOZKgge9I16$zA1@9x{OE`14WmX%ee**CgKQg z!7ZJXt!$btFmenP+iMP{FqQ~f9-w6})Hl7OwCTx7daIV-kt$f5%T!o0@N5=S)(y>5 z*i8AX`EbE4*M~J|=ZQ($z&o%0^9KHeFo)L7*_YLz~P+;4@=N-mi+C-&vquzeA*XHOXnSH$dQ==49VGYpcMbe8;F8tx_W5OE6 zZd6uRWF39Ub>own(K$Q?4ESx0mbyJ99^)+M-~NJgpPC%vhC3xvW|)(>JQ;|6wQ`7p z_U0!F@iE%@>8@t87I6&U6nbU5YjgRi><7M3=KPJ8tgu1nl^2M+^@~Rz?tJPEJ%?4Z zm@I9)SN7^4S*9l1OZmdHtk#Q{~UqF9M>~5!q3{q&MBm+C%oyyNt0!S=i0vhSY}UIuL!@`5X# zE{SO9@0&Zjsf8VulxMZucD_F;mVJj>GT=hL;K&eY|HZZCFdp68j@fhl8d zOjJ0-wi0hvzZuDO5fR*kGcB(Uz94Dhr;N0H%*pG?sCwF?V5_!IbZBm6Br&Uid1z-! zk@C|o@|f+_)rz;2pFll)%hr+mGtauP@|qyL$#BwfsUodZ76J+U^l zn~W`7nt&vkc|Yz^!>>K1n@r(^kp4aTC_Vgmlfba0FB~Gjh);xL%B7OJGtzFXxL6-O zW?Q0YW-ef}P;sO)82Gqxd>K!lhVp?%rpv#G0C@a4PZMz;@RqN{meXV zf8huUI|9JLmf@;{S^pSB3 z(>zC*(UX?!oG=#=Tp=@@1V=(c*Yw?;ozU=?)W5eV(Lag}roSk4tN+v1cE5{eP4P5C zU6gIL$zbbq@L0Nqc~hyMm=ZL{Dq~ev%IbT)GZ9Hzk&a%r4EOJ_&OIJH6=~1HELOej z-}j+gGTE2%m6-xlO*CzlYK`H34fqU{w+2IUWXB%a=Jotg0>Um{dT+iaK`n{OOPvlL zUiK18k!W9qw97}wMlUTvnWLj)U)kH^E7c!R&@hnh_?H@eu(dCSyU9m$+wxMr=i#rj zJV?+yuXh7S34k$Bh>w9n++o+wg+wR4KJ@Ehz)$303al&HH)mj7rBS*$Uid5XujJF3 zWM?{!T0c5kgt1I{K0&q#<_~v8T}L+?Ilr1axK9Rprrim$lGWvgByUkgZVyN2SebGa zm<;r{{K!=JU`Y_85;NIK>Fwb`ahLio;VowcMj+<3!)5Mc)wHCP5%P=@BtVs~_hy&D z=k@WOf|O^RpT2(!3>X=5I7eevpnvO06n+@uVL`9vR9m~Q%q>fv;j5O=V9Qhy((+B( z@U9K-4e7RxgNIQ#6BYqe+9%}uO~IvbXFM-1*s3tMH2IX=%^TrQ3{ObVnv<$qS0b|H zXBGR~4RXzIVx?XpYI-zOE4vz>7|G9aqI&&%i&hj5-WU+n#x$-!Sfdm`_IKDd(mW^p zykIE0lCNcKu(eEsQ{H>Wy4mXJe0KChE2dSdS$7>C2VtR~?uv`kx!Q2<^LK~#$uDA= zHJT*?E!}-~E43}p+Yn1|E?HkwQ{nHnSW|JfA-Nw~`(*G|56UZ`jY{<`Pk))k zw)?5*iqqU@$kZN5{z<6mPxb8QQhM6W72*2ll4xftUjiWqTN=tAF{?2^=Gb*<&~_I% zB7A{QUh3%jiqfT6OAjB6ydCK8XS^53q$D&>Gbb-{kM!%O;8>(!YDFoH{4W-?>csq= zhUv+%(6#XwMp*ZBccsP|_Y|iV{rp~cW%{^?qwe0N9-5tfOJ1KXRKu^Ikv@^JJ}V?S zCiMa8L0HbI;rwggyFGW`74Ko1m50@%oMk)tT03@roR3t26Tz?fBkSu$`O8OS4Bn1s z7r~q`FLka>?IY%HBG~5)&yn)DmxBEQ8jv$qmhplWZBw~h11jYradihRARL1Z>{^as zvwJc0IjaC{j`&p8`2u}RS$dj}w;2FNWlMuO1&am+kC#Psr?~VD7NaG4wgZyy znJ~>8w{E|DV%`^a4(h(YNG5AUtexULmts@-UZ0mMboXSdwDBSb%2O)&o?*l8hU%B=^MBJgUB;L^c|gnQq$V~8!i zk|)-FncdHpN)dbZ2bSkHJKVbo2NV54C}&5Yvr+9F|IZS*e7_lJ6nZk-2X6y#h5C7m zYWLgOVhfN7s(u}6NRfNDHyR8x(78}h@7IEIG%6;^Z?8gnYYvipxf-Yx#SRU1KS0Js zq?=PRGQE=g5NeP2#~H1)jEp6-eYw`}89TtrUY=AnFPsiHCO%(fsw~}QvDQ5^yr^re zA!oN^t9)UfD5&+mv0FIw05*PaRa;4$oGDXCgac)l)cx5fOuYN0O`NX3v1BMsnZeS5 z-~j|_chG|>zFc`B$+z`pZKcVV6NO7qbno-C0-9Oqw2_h9lnCaE2k7i(Y31kt z&ZvZT?@OR&3m;OXTRd3psr5d7c(OAS#-tcKYcNqzvOc4C_x#Eqx2x_!Q7^~J$HT;w zSAGy!7`@otMk6{aZ&tN+8#}9}d581*tm{%o8h17W8cPBPkkfZzxYZ`qSQGooCkL%w zo|(9odBONy18EzdD;SIOpakuuw&_vvYxvO!v$!_x_~E8yrO0PZw)^jKZ=Gv(+-Iu! zt;4a;XQS^m4wAXQO~{>IJ)5>*yWDs|LfNwWk<-WY*`k$sW{h#r@`azwIOIVfFyHYf z%UL8WsteE{49f=|M9%uB!fc5#720VU6Pruh^`xizrxH^NDZl9H-7Nc_Z1M9ki$zOI zNhkQzQ+~vaogjT&t>5rcVRJWISwsyy86FDfJ5ARe!}(~}sEhk1hUV9g3-qBcpY_AJ zH{40>%Qnl{1#Io8;s;5%kKj^XkM`Ja{DS?OhfSZJ9BH2l_&C-4K=+?Ids#nD)YF20 zHalI2?EB55eaZvdhd6w&=&R`MFG6)^GkO}&sPf+-I=d1(NhmY-u`xc=C#NbmSv@1i z_OEzg{N2zCrXS3V_%XaeQ^Y0aKf`{M#SAFTORV*o2Oq z)v8pMPJIKvNcUd8?IHlVOt*hLVZ6N59wqF3#G38>n@#-kXk_h&Y-HZl>>6e-pOn=? z<CuN_W+l#L@8!<}hBI@|ekHLZj>`gK z5?R#J?Lv;8j9U*hmiYJK|nQcZTFC<6u4{)AH=((|l3KL5iQ!AG> z63-dOt{Nnt%z)vjdGD=9zKF#zVZU?!mBs)7+V(9ZwT2NeQR2-Tzpa1qf><3b;Kj>} z4$|p*O5iWd5O2O+$LAnL2KtHD0EE0HGY6=IXi8p z3BDJ8C2>vHc)E~#ZRNQ`o3YmqvI5|j)SwMXf$O>kF_dv;ITr#Bx>;0MfQG!qU!C+n zK-?GK>pMn@juHc5a&X7SSvC=W{nDSL=K5!Z4_FeFzz{uHbkoBJNIn54l{r)+v}Eut zukRx`AkQcq+W^Syb>-rxc<%*}@A|IyLmj*dV8I}sGLU_3S4W=-gnHm28X6j|mG3i2 z%-@Lrg^7S4v%McQgN5Y9+Plrw7^rRJ)m}{PWvin9x7Pr+M05RTX!^ZLnai^Q@eGp9 zF92j!{;q5Oyi9c}%#}01B($MLtX3K}xX%Hk%-#JPMu5fh!nFg*>ywd`*|hXv28dt7 z(hwKjJD~nS_b%DK==!fr8eIPagL!H<{;!=sdUgWJE>&UdK{5Bs8yoSQG`m)vyDTfg z7(g%%!mJ_{w{g<8I!3&)pycmYaD#CHHMi#Q>!r4)#(dp*?QRD zHx4*ICY6}TnojM{8151&)6;q@hgg|=QgJ=noUa&t$dvP`F!TI9k@r%UOAdJ^_)zJ| z*IpiwPzm+wubZ9!+8%hpRM*GA_gh1G-{hK!XVU1pB}M{FEURv#kU&QI?d)}9;+3a4 zruNz`$|soWjvWgGLQqQ&Ucx3MA=}oURxTPd{0rvFdFFscJiyVkGv&hU$eUvPw>P(B zpnJjLPCs(s8f*^8E6uoq+i6M6dlqacL&=lhimOsb0nFt=m_Wp0FE>ysQkp)ngb=O| z!S(tv??8{PqW9OuRi|EGczclXW8_~6{T~R7TFsQwt*Sd|Ql7xc8L+hLcU-bCYI{iswIY z5&5$pG?KAC6xSRduYHA*$W#^sZQ$Qq^=w=(EfC2$6wW1pRG6&YDrW($heXcQi6bmG zJ+ph()LBV;>w94nzYe~A%#jk^Iz3g#9?ALu^&^fyv`n?-~{fZ&p{Y2g+d-G zG;%Te*GX-B`qyr4@zA^;BnHh97sj7D!;*q#4>)SG2ZE^|Eu=ImH|9%w_(uGgMA&Zb zdn2NCIim&>?aci?$OGP(tlDV_KRA%Gs=Fh|KaIpcXM1fRL5eKk1Qo5mumO%h1Vwk9 z6gPKZ&Aj(*I)4O%YK|n3bH#7GI^Pm@TEfJqlZ-au*6|7%p>eo-?ON{s&I+JcHqik* zJ?FmUV8kkSb2#R`M;{@F1LTiP1qkDf-zM~q+7^db-qk5pq*}dO0@Qlw>&~n{h(*d& z_-Bultb>uix`Oy_OO^HSuBkl_v&;mLO<$ke9d3QO8^^l7%4_v9k}IE^k3x~3xMg=9 z|8brv)2EupiE$5|YpG`r9L%tOi0{caa$#V%B;YU_$ob;9*UdH(jZyB0<0uu2&plGP z{G0XsFbp`d0I-R2@o+$@bE?wHX?v1K2i?CX$hhn>Nh>E^>#vLWLW92M74H;IbLS># zZA9NNx~bkUXU~_acT=ZqLc$XaW4~8a0!t&A(w@9B^+&@GBQcenhzy6VIv;qw?~vFc z{H+a?^)q4VBmbW4KhFEFM>6Os)3wbO@$~I^6UI}f0g=~WbGJg!+y7mPa(mQ-*=SKh zjt0SnuaB@=j(q9kgeDulQ=tiEB}}wn!m`=Yb4pJo)=@;a_}T1gTSBjGpzw7Z*XIka zpu;=Hr?;ZKVg@a2aBzIU_1w7#6M0jYYwSL2tdrV_L~=NAFh?GyOaAxILxdWM=(f-e2be^ikre(7^JLo7mXdUUZ08C@Qj)x`EqjPlR1&Oj)Z-WVqR~(mNWFjW02f>>SbH~U8#He zBt$%MegI_27rCye8Oq-Wbju_eWOkf%iKq;W(tB=pm&!ATS5f78HP6vdv86_jGXK|q z)G`74h7m>i*S6w*aFh5Ac2yL40mU>RJEmwLfn6@zStJXEJjCpJKpr1E08L4g4lha! z@?|mf-sp6AXE&yGp{L1Q{xqNLvhDNH|)RNkTW_6PM3QD4m5~IXpm=$ z=U`#-T_Ols-sC~_o16c*jcsmp-F_TlDJh4x3&{Cc8MmZltc1_WLYh_fPl5o1oU5A~ zm5{&JmG`Jc9wrj(CQ0l!abk#7lRC$PplVRi?Oqxr8lAWM@xsC);B#;C%U(%yL(#VE z@rl<>oWLIT=M6|5Z)16IIewfN(vuJ$4Y*LND$9zVD3e2MdF6B$tfBD_h}|S& zH%6{0b)e49>E3M)7FBT#IZAcrD3eRZkrT5^#ETAHUm34ris}E#ddh!d+Ec)#_S;B2Po}K)L1&2S z&y_9MzCQTph`z;2KS=!9+l%*SvEV@pq^+w9=luMfo^KI}=#=UJ3@ujo(+_53aZjvV zFp*IR_yFDF1nP(yz*`xf%B(c9hK8g=^^{hm;U7IbxK3ziqn_}~20`dlQ$}oAf=TWB zB3~=jpMCb#pSVty_8;q1HdO7~@<&#iSg>)-6;X9% zp8errVoAgbm#~i)j#f7QeS_oe(ND@rG3Zw_KIPSJ5$>vMpY1`uB(bHnOYuE2ILq}d zS35;*ee$M$=x2{p|0j|zuHYCJdU=I{LHGOLAxTt#CxH?121i3d=4{DZb8%JOPz zD7cytjQ-~%Ij>H4=lB`nrI0|>4-#SjB^@vN>indDC(mDsRs;7 zE!dNxRrd-GdvW(|)@-jU>#8b27xx<0x1@+LS+v?&`%=|(_HaH(Ol4fM6-(0NaY?$O zKoqqOfCs6OQObrxB#Ux+J>S@XmlIs{IIWO-t(m*AHjL=ljlFXb`g7bGIIA zrm*wBjlYU=<&AmePi2qazwu^IM$7N;rMXhv*MGC3KT*UTGf4C6yT^<{hwB5&>ryBY zpbH~Kg&{|^f~w59#N%o@A{G@BJL72j<}pc>KQ8-R49%-K=_fmo!}Z)MsfnIy-yA6% z8X5|kFb<&QwK-aB4YMAnDt3RachzuRiWx3!XcH(vZ+UBH1qQq!g~4W`QHQUwv5-8X zjesxpv!7`S|XGOLye+z-xC>oB&ou|UWPnbye8a8&Efw9Xx; zg@7Fq@v~W;ywa;upGMcr`GHsM=Dz@s7Yn&J2Sy}5`8Kr1o`;H7RS8h%21!v$TWMo^ zU(`o4L28eq*dYT0GRv6)L#?_beb3x%!*paVKuB$RY3?pXv>5d~z>YROIWv4wx9mXe z@sX*TM2r7PE8-JnRzv81`xa|bq-#rIIPAPep_hv#8t3Cnb_43a{UB((v`IA7X3Mh@t=l0WZT;yhwxH*XA)5Xn=>9cCa3KJv zR2Ip_{XsYWJ+ok(@IYgJg3C)FI0BP7{w7~{ZA}1QoTJ*TIeh@UlMmnUqusf(V?zI#piA&SP|E+Ial0%wT%v{Kzb`$r)qB^;$b%e4=_Z2Gl9d z_jW%7BbFF}zG{5W1C584gTs`vR{FHldcQY`=W!Z9^Or}kUao@aP*UvEo-57eGu-E7 zXhquDWmC5J`=02xqi}osxl9?d6~%RBwOqvTX(d~#^+$#Gd)zetp4b41F5_&#>6}N5 zCn}1o8JfVP2p`o_qas384oKQ$D<4f8i5!0wdGV)?4El9V;|(x2hN)otdqIfMuPBTp zoYc@zmzp00-xi-bFMxUH!VwFD)6?yl5R9)|~gB+GZ-f;;gU4o;7tH-Dzn~&U2m4>4Z*FknT1Nk#0y2Qil$ZK*u)yeI<}= z&REMdCd1!jt**X&LNzliaAEEDl5l~I%1A-;m$#!XUeYIc0~)KES)F?6GN9N1w21x9 zXTC0#!fOHV&> zT}cgm`%n)^qbFaexBSz3Ruj6-x%ni5y7!6PiLnMp#@-c#vckd$Ke}WL>YHto<)^a4!owhl%8sKv&$roB`1eviXRP^W3#nB3|%ZI()iSD;TP63bFi7`WB%{Q_xIe+36CBV!X0nC6MFRyu~Y#0 zU?iRS_RLi>gd=OuhnnD(+K5IQ`kipZt6KhjFx<1wok-c~`{pK^Hld_;>MoK8F^9Ht zJG#jUb~egSJcY$a3GJwlH}ewa%Msq6v7vrzMm4lmH(D#ujXCD z5b@^9bkNs)jZEsZQQ>pN#+cVp?s-L~HBKy+J4n2m))~Yn}Emt#|Si1i0Sb>4~W+Ys+f= zII}l*pPGnW$1L|RA-@KKL8c7BuTjA*{#bJh0fLa7>eIfi0+RSGkfjOHUii_f|5m_4 zo`$^95+tVef6Moxx37R;|oLLJ)Jw4u{*&ZP52s!}EiHSK2`z&z_(4nSJ!$OSyiOINWAKI@3GiX)ud-3I2 zmrlH5Ca)T%F&K-J_mY>CDe0&?FZO)FSim8~6mi})_Z?BPXX~DkCZd6fd}PPCP^KuS zg)uEQJvcuVS${4ndf54Ta3MHm72>(bSZYA~5yP4ww(B1)0F=P}WH#ZrFBlN%lH2^B z1jd6?%ZN-JqSv2#!8>K0r^62^G%IzU^qB^1nF&u`-DjMAuo7kB_?<3Ti-KEJDEw<1 z>s@ra*ErP-YYw0J{4uda4_RE4DfMYuj)o~U&+gmOtPDMs@o$5f^rQKdBx2wo9fvev z_8yE0y3m+9W%1oqrhi)fp3#epMYjuXUa9c1P_<92Oa4|k4V%9Lst&0R>xAJ0 zELP=@_aPf26a5e1C5cwp0qU(T5-FS}Yv+t_hB>$QMBm01*dyLb9#SXitiO~OTg5f? z@@fx1<@?I9^_AQ{jGETY@JrNqhF%3Ga(oNF^x>oQf0Kwm$f~C~`nXF?uWJ&}a5_7% zK=we8wU5$17bDe4+@L@_;omI&$WT6XCPcYH$oBd{9 zFgefw17zM6(R6}tU#S85&e3!{Gcbyg1$C+8C@6fuBPtp_-a(JQYp1B}5>4BrQERAL z??2_>rcef36kujnAH!Tht8ZJ-c#xsUo7I76Ll3oo)~d4J)G^MsMU-u)J>~ z){B2I>m*tFl%BMyXpV{;91IIVGTk|Qk9<j z&9!@bQDk>@r9jQuHoGohd{;0nE z@s~D_4^;YM5c4ByCIj$gNUJM^>Wl%_lWS>kQHJhSdCju;-c@}-hK+% z?c;NCxbt9O=W&=OvT=K5`H>_WTX+#L_Xc(Nd|_t#h#PJb?+emK;T zzlqN{pX}{4NlV(_FsKEgxDbo|X=GMt=3lUFWNen#m4QOgNL6#cTbyFwu#t?ue56LX z?++zu860Ic$fslKrC}`@xamvo!q#MnFH@t}Ac)N>pZ_hWO660~NyPOL~t+4yux>w`j zR|HkGty)cGuhL5|Q&^f?R+ezEZ%1bwj5(J}3vea}2K=5DCs)v9<(Lf2oNLoxsdNQMC5+BLe56oK&N4b#_n}&*a1iJRQDc1D(ZAI< z_3|1Pe>nlg7j4EY0!?GE{Lf?1x}nu;>%H zL;-<S6qt8vT;rX0b15-8d~k(dw5jidA+I-kZbB292IXC_n>`gdx|29w;zc zD_MOJKI>-O+pFr5-q!IW_49FGR17%O_B@zkA&QZY{$g}eH{$>P1bT+ncsbr6d^}}s zAnZZ3-<~hVj8f0(cCm1=;mZ-2f%hJ&Ee{T1q^>`UKFkbp^0y#Oo0dn!1i}paVICiL za`Bt{JRXKMzg%#xU2ws2{J6v8Tei-;viU}emX42)Yg8S%`2FQl3 zlR*usCoGpV9cyI_YQSku+#mo;S%;-~1F&)oeXu33{2EWONw<8WxdUFWp>wMFw+G!| zV?s*Kx&u+&D|RpqLPO6To>0ebP?nB{`2wiW#?-B7Vm-s}H(gmGy$PXgJ%YDXi?f$( zB^}p4;`^HRXCCE0_DA*vYFSB@M;n2S+<{#^GV)x+;;S{b90tv^ZBle%hlkD!k-(Uq z#@MTf03RnX>#?Fyas$X=f~d@-=J;sTj|fC%Q85>*$% zu@pi4J7aoqq=5qMW^7x&hJVd}HCO(D6FxwNaJ#mJl$H7S5F1QL01ok^`$u@`FnW0< zzdyc?$NO;s(Po{orm$u|z_@@t+o*=L<*JE_2VBg+(!uFlBrTHxDG9BF+Fx1ZDKjDC zC*<2=RP%v-}@!jXWLY`o|E=q-z4P>`2$s$#|rADom_~(bm-=4(2 z+%Cij@K9s1B6vE&CK*xXa#P;pRx_B9DeEUz2Rl^bM#B6`6rGAMuzta9*Z?a)`T`H@ z7)x}%F#b_~4C~d+tUV7QntTjYVmKBx;fbQ-$$8iYokU2oI+tFH#OnxmdgAb<^CPln zT(KhpQaBJXOsTo^+cY$TQqH%cTqj_2u7oO!_@l%70h|J{Ws38Pj5XnFCUkmCO|J_R zQLik?txB5+YFfkcXjwc}CtkhUSMcKgIqD?}FN_Hc2s0g2sfX!E)Lj8GY>4v1j$)zU`8lPNS&RM1Xx#mHQ(f zKjsb(UkJ^Mt1Y)DDvF>hw8v=TdM41ddhjHFI5pF#rHeA_O!m%e+Z|bcy|5?uUx_+F z*l{ln{_iViRxiQ4-^{>?1nqEU|I01oYlcRfI7f;vyQJsFAVVYapr@d+xv;QPcyBId z3J&KE4Go1~Ltd6iL1V*{t^--bXhRn;5(U|EZFbF79B-C%c zqi+Z3`>_9}x=?)cXswHkLo`U6U*l1JY}pJpl~K_TaW+M z9#igbeno8GYpCEb$FObVO?>R@ABVLC-v{!AZ78A>q00V@*!{x?2yrxnY~iqqx-HiQ z0?jr$Sq^oFn@I+gm&3w`r-HlkhUHV4HD#w(j3taY-|ionQcN8oyv4Y}jqK*wM)_X5|J3skKzqhpP)b%3dz zKRzqiTZ6f34uFnUTtrCLpSgngujNa)^^RFc9IG-sv+0qb4-G-I+R1?^|EFJ1mQUq2 zWvUZwqOYsq$#Z*lq>G5Vc)?~>vd)5N*hw;LEX{_3plDCr)Z11wB>rW334Vj`V6Qq8#Fa2Kx5L8N&`o1qe*GElw1a;E7Uey-b0s7p60A(Yxy(i9 z`xbS6ii(J+hzR>8Ma4n5G(9^VUO=8)!x_msQ9m~Enjf9OOv)@Tk22?OyR8%D*K{mQ zMld1j^UJIFduse>j>_zS9e(|~$EQM5I`mTR0TLmaANYrLZk&eMNMe9<O_>A1FPy_-89A&&*D+$#Fpvs-CK)r= z7dA*~Ye)ts0E!oMI!^|2AARq{K@ikPDRo}aG~YdI!$F)1(&eoy;F=vyL^oUwZp+_7 zR0?4sJL`>QBQ`rlg7iZ9V$$U~!3bWJdT*IlvsB&@$%)wIV@XQzPDSn!{ z%8}70`;urMbht0y&jZzRmQ3W){cQg$;n;gpwrv;AsTsuj%?S`2w!7r(K4692p>v?% zY03b{eqa)pSCXrRVI-HSuPe!1>#+yWYYN$|d ztW789V?{kJDjfK+0{=MXnCAI{5XSBMQqGM%7%5I}8@wGO+C*F`Oq9n(fk3LQfDn6l z;`rw}o+dZfCm(`YRRZ)~?zh?8=ZaE=BUhcT@v3r}N25BPfF*l(ge8vd_1_7I-95+H zD4hulRlohVnJ~0~%hcKbT?-)+&F^>Eg*e!1`K;s0=Bd1xmPbs zL9`s|$7a;n7L*IZr3?-5BlJvH;FO4k`E_ccIPfb-$rp`KVZ&rm=Pwg%!aot1BKA!+TkQ*RU>K)^gtG_$Lo|dSdNr^4^_?)6&`< zZ|uGanOH6uNz7i4Yk7J40-Zov+FtQ?lyYizwj3{pd6a5Bv-_t?o0K{S3ETSmoc}Gm zLCRoKNWw}UWmzrbMYeynvu{T<7!}pr__VXBtzfr0JQz~Ew8m9Q`7~PN>Z+CQD)3aw zlWw5t054YJaIy~{^15-Dk@a=JxE&02pe2U;pxe10)W0in>BMUQWj} zSP&WA+P4Ul)>pjUeop(^)?QjpPNF4&dxB8bpcN?eNYmRdS2|?%uYY#IJ$C$KFGuK_%m%9o$$K390~7!z(QF3l|OG zOGnRoU9$DqrRokvXEQV@;*j)Qk?>?RwT3M6&y5KkkLS4bM65t}0k+6&5(M!oLq+)R|B&pAbEp3J1*& zQQcbz3rA?sxS6N-1VJi3cJSbFfamTl``$b0u*Y^PJB9;=S?3mamd`~*;R8l-tr|y* zh+LA%xhA@wj)Qpq3H6Xm?+RUx6%U9kA2yZeeF#d^J}p>5{I$cDv|O=M&%qDf)rsYQ z+}~f>T~Hq*y`6)XV9hIxahG#TTI0ApOu^tl@tLQMinIu!;FErdP*5e<&)Iu-|7CoS z7ARgatJbAIdl27muKtib8l#xV@jwi8tn-QEE44)@G&f0`fA4i#sESGSusLh>$%58paq_GcWSY4CI1sW>Dkk}GY@ze1T{uGC+rg=W>+`pT_-PEKfv zB+EG1+mmC4AT*1rl4+}HS6t+zy^=WmY|R$*jir2g?P+HdUJl~#1c zZO)oPU)VQq^;>_4b>)}1I_UJ)hrj=5ygq;lzkgxzGgSl@zuVb4!(Y%5Y6 zCOrx{MQySAdNxJ}>(Aj3T%#NcBM1d2zK)|~65@49YGSz^V^L#D5bmWhnBHV3g~1K#ZqlXKy;DxKO0Y;L-jf{k zrcpa>3OyUeD^<-3t36WVh^1uk9J`HhXj^=U1^>HIWf>S1RjEBY;BaIQfhnG;`p5Au zuvR=q3t)^%`h<6X|6@@Ti0yRDc7M34|DEMsr+nX{0>xoK1F-(LFVoW^^sAVUdimCR zKLNWHrVr&pL(*WJ1P7;Z^E%T`8H^hYwpYkEkI3Ub=Rms&wIBivp+z+dm#(pT^-ppM zZZWksQeR$3-wz;OOaP3=U2P)_Zq2{ALV)%*6ld^+K_o z3Qnjj92-fBXjpI-F|E5&wuOY2ZWS?TCf*TA_63RzhwFd=JQfoTA(Ug%)n}U9uo0ET zM&4X5;EFv%=TXtVc>F*$4Czw)EX5ZR#YgM;yQ1NYyGAP9TrenqoAG8LG~tlXpvuRTNf8(C|z(sPA{xhI>x9xL(3KQw^q`Gi3~IzCs!p^-XF3$0p?&BjfBc4z1zP#|Otn1p-h;1S@mK9A?U^6C% zd(?R3RWy55R_1jA0RLZB@85AJ=oXluOaWnX;nwT}UC^=={2HDWe}{boaQiNZ;3MtK z89!5ubAB$2^;rI*C)(@zP=9bvyCx&H$UnsUoG8dSjI2Rm(!|stv!|)w7B}0a-mYKO z$xbvjaiSpMRFHApk5W}FJ|`*=Wn}%FXaSq!+jZ|86*xF<>axhW@Ey)_-D&e{U| zl5<*quu%1=E9>4L%>-b8*BJ2rc~(=M=%)LYbKSzer|XSz!`e;lf8P7Ui0dv`9P!hH zPI0gC9`o-ydIl|pbbg_*%M&T?{;9`1?qVSq$FKKn^IH8You=Z(ItlZ?7`Gwa?zmY} zNBEVJF_w|tFZ1JS1X>d4{ii@R%WVPTb?X+3LLXs%kH1STiCfTv(KBih>g?VQz2K0Q zDxx;E4n@`ix?GcNK3ZY)jW1jBK2;OF&IYW*tk!EZ9pt5-BnM+drP)Ew^vMTOyJdCD zI`YE7ll9{X$E{E(O~H%;wcl9peC+=*fCoZ0p)%@k;5G%cxWpRmLne`jv>> zLQ|=`!FHtko!kl&C>N%jw5rQp9Cv};PXGRU+~PiIm6(|;S1w&E!v)b}chwM~fe~r7bf16vRpHtcea0#9;y-Rk1IHJn zsGqi>4#k6X45qm4PsA8(EX|dde&-hnyZlzZbkbe9m05jGRA{Ktxytl1m>_h>65S8j_u zW9i`GJ`-7cujT0YV9Fx0f$t3ORz7+pO3G*N?A}pHc@t8RAgd%3VlP1(= zX3MrUbGyQ{mIV!k1Mjk%OI_BGclN?gTYdVD-3Kw~CKjF5QwkIau2Jvjr$ukQUHdM21XPib`*0S2>Y`9adi4ME z@mi7?F-9=0Dvcg6Lj{xyF+XK26C)gq7GSm3!L*VFre>Y09jxy+S%?ts-zehZ#P#x;k`}ce=r+k{O1h~y5agNE#wHz6u`zL z(WM^OT9R0rAF?7@;&Q4YgUQgIzf?^g=t#DcsPxve9C#LRG({q@fxk``1ED*+GgY5P z$_x*1qqBuWA9Wo|w#`!);ObKx`32V`cr1yF z%-!0cBO(MR^4}jpm@Lp^2KfawxWewc-%1O6m|b(sw{el}FXK}jyugOQ1BcNQCI4K+ zQu|%Gg)-NmfZh!!nFSW~fJhkL>zzA+)T?2GEn~mDf30357x}T2VFV*$(G_{KbWh`XFVrfyOiwO{OQAoblS~vJhj#!PwaaG z#OW!Q<=`o+_e@zv%(Rr0_1GYV|W5 z#iMU-JK_v(eloD5(xgIZ@@$7QiVQyul6Q9*{Q<#L+EAy~9tsT!S!1_G0XgG^Xq;4( zM_N`A`V|qJIkY$r5I2cjLh&SunG6||YhOXJsTC!fZC;+S(->DKkWK~tGbj6eH22%v zDG1?WLXMsXvdAL{#@X5V5=sbC`{4>!w<<2F)w`6tgBGs}v$6Y{4Q|z;bo|}fN+}kg z7LbCopCRw+U-AYoluymL;L&Eqb@tS|)%?ixK|rhY#FC3*Sy^kj3o(YOD*=X)Y-~J! zYhlz=b~$BdXz?_pET2*?JG*eayXqUPcvCW<=3J(OpPUK;bA`37M!vnEekyr&sC-FS z0~TcX!H+0YVzuat0K+ihmXh50lyf9vCP`D*p5~tdkG<+mDPXCNOxD8kKNFmVWlnVU z|4Qe}Bk&-`Oy#fT)oe(6_E7vbM!p6}PI%*my9{;2n-EMnBmA5N?|du>h;Ymdvjbf0 z;L*-py4*8jpTLX{b=U7wJN{I@7`dA*>h|6t$75jTmH@?0f`eKSF7Kv&Np?wQl zkIMZSUD_BuQ2O!;3egBAp3p2Ctf+zqg6q;UDO~JwS5X&bebU+^K%yYGAYUqDZuRYa z)M^M%Uw|S<_y?;4wl-kitkrh0bCHKHKMXXB9(aWGbRZ*-KekwhnrI!`anCUOZkDTq z8+~s%q(UGq{vVGTPF!%e<%<)Q(?B_6jVl#HgD3JYp@2-S@q2N=ItWmh2n4JH)o>_| zdWt;Pc(&1iKUqYK84AUNnLJ@i5cGv$AeaO2;iQ^Nm*FaxwvWJKs@xyg#5xB`B7#C1 zlx#So`OEBQ#JgwEaA6j>82PxI%%9c=NY3&D_zcOolX=P8^gD7$OH-wHGxL!LEuNtf zX7la^bU#nlZ3omfGWcJL(Wnb3}TnsF6JFFDyl+tjU$j>{1fAuEpR25_Jfd1=#Y=3tbyHnkfC}ZP8;THnOob96ZY-g~ zZm;G|Peh~DtfXdzSv;3+!z3jNj0`esI{_{*`b!wr2VviZV(E>?gUJqSfv5iZ@-is< zdEU2EO?h?MCrDMM|1iqGKe0?sz`E_|ud;~}e2Uz1`Z0Bjh*d^~7z6HKAxBkaA{r-! zDF`uBDAyQu%?(!t z2j4ng;=$(1O*-HEWVUet?+)t}OuN6?|66!wtQ8=-#2ALwqsQzmryY3bH#oUgfbdUR z)Z3J}y$N(txMK}<8t2AKl0nSm0HZ=uvHN>H520W^-}SL**4UW@G>g3-%w^t&Zpl)i zUcP)e!6TK$`uDZ2V3$bH#_%?t`7LI6V;3a)AC~e@g%YP>2LDC!;04X?bb>SPUODZo z0J)$)L_JzG&*ldOuBedOCt?g4t;%!3rWXUHr{4(=Atx&-!_=O|1ZTsxrfXDIK!#L; z1a$}m<2)OcfRfyOdy@xy84TijU7sF~5O}bj$ss}3a!z&{ycHK2o%!dY*|7jcOM+;s z2Z{?(f%Q~mjt=?n_0$4O%{0((>yN zkXu`<+%j*6R}PYbRs~9wX0045JYYxQ23#z?&p8}>q0V*DA0d;MfFpg1Y`keBD2LVj zH&1cku${T@hdZrI{iRhHyQa8|+l>LN?;7eP4zYPhMoEp5*sOa@W`pU#<^_a-$*%4HA z!_23(Wb^qi88WF*ET*$;D^~xwG`c)Xov&&UmSvpjnwYFfJUAeF^Owd(v>&I+>(_=u zOFU}s=H6L3MrtNIlxwPLgQD@bul(h|{!nAACR<1V_fh%o7kN=8!5O*ir2BnjGff`B zS@$Ir-QuYqC)x1xncmwwjwzejpKfvBHYyK2-@-eum^y5JLxWn=Mq*9Qipc4$7~Rpv zu@kj`lQZ2HAI=k^7RnyTUr6ZzN_}ieU&RS_-&Qq@rI!Pk9<-y3`tgm4)fus|YW)0M zgOJ4~8VwztWLWIBNIIA)idFGx2=g|NddW9Y^l}EJFB2SFqDQfN_BLE38-m#x*5e5O zbAu=GfJj#2DEJ46hY(BSod=#87s$F-bbYcZ3T5Qe!GncN$czR5mnN0*fRAl-l-6e7 zf;ZMyxDgF%;=ziqEG*V85AEQI<@4wkmdeL+kNJKgacDp%lBAQxgy`K;s>XMz{IE`n z2RTZdq=XyqHJl#Kj`wVsNl|pLUpHbGxlfnze9D0J3*k)BG<0Qh7~1gxGn$!c5X<{f zDC4?yfVVU8(*hf9mkt&g*KjTD0lqqspwqa2Vkk7H3KTmY(YGz zQ)QMzuA-^bQ7V)@X`bO1TD+cUwicQEYvkOL@2S#>YPe}R*q<}4==8t7{;zeilf?qF zw|SMYq?_B7VgnWlEHn*j007$(oCU{nkB{-@Y<&An?XW&dHBX4+g=gMd^=*^oU8Os9 z?wmPu@2KU@UXfGgX%Mv4Y#`ZPWeVA^U=evUC!7BT3dw`xkIqEZIrFtv@#q4d|xq+V*UV zd9x4k<9&C6GjLuHF~+*@1&7aHVj&2Z26%8pyJD*LaCEvvTgAa9A*oJn1)v?nUnqMe zE|v%gK^4D@CnE*lYsa*vZid3r^-Q526! zjc+fU1?(N~Ev@GUpRj2o#oWgG7JexiFzy-s8RaIlVH(c35;F{iX1X7wsHs*A>@Af_ z;lXm=Qp|uH_8$QhyNNs>i;8f zq;w=0E&_WK%Tt*pAL|#KF6B>NzBAuC0gk8JNb_g>fM9au%Ek-)V?nUu7=_s4x8o## zJr4IatjID;0?@xy$GzkZCr`$`+e&u&r3Z0#K9FxjxQz{xb|>-=%N?r6i}qCyzG6CA z68N0`$Y_N-i+Xa6a8!aTgYT8wNkafY`?3uI22YSg8MP+zf};M~ z=l*lUJq50H=iJ>024Ooyh0d}`E%#wrXQx?5TU(7FB-OQnpLKiY)se|j5|SIvuB^P^dB>W6K?EnhY61>iMexKit|>hRA8vo%06N{JOAh6=z7 ze(>;G?DFLSC@YC=drR!O(d*a7-t?bhM<#-ZE^F4v(pp*_tB;_m*U5&pzt|q2PYZN^ z9QE8VE&5`U=bkhrzfy z_A=(+;xaP6(Xp`;$s(?1U;wnr^XGb}Cr6c_9mMaUfWVkq`QKV(r;QiLx^4Ti@|R?2 zlAwLR=uFA<;rfh){mPJo<_|;ptrdI<5y5J(b6*ImnCz=JONNHEDoEH&xfX4*$+)i@ zN)eJ*HhIf)H8P8Ff4K!0)4B? zcsESP_2kJo92Baf%YAPPGOGsrUkrHZZNF~iv#|(4gd=pd-KR75Sk*nY z@HlB7we=6&+@1-jt#IVR+k9E!VlsHt)qYxETx6C~AmE(f?s1D9f>8&JdtqkPJFcZe zv<_+bsC599j>!nIc(zAdAV@q9Y?I{`?u|QG0pF2KEiCMQZ)$p4c_343xAFnlfosY~ z0|Nss*LfCS3H-go7D?iB5KJg(oOM}t%@2o-Q4`m8N6qim+711^?+J;B2`edpnhrV9~YwafQa8@HW6Gb~7htr#m38)W4>swW)d27G3 zl%eky4sqbY@`_h*{u?7)#a=IN5MTKxESP}~;PSKd-i`qR(Fr;YC23Idm*MA+8D34) zUdC+<^XMSC)JSml3&pz4Z`@C-2|yR2;o=QEb5?*{Lvie@u*&nz59EBki|?y_h=yu~2rkJWE!(pTE4srN1dRN{UulfZYC9|pO618G7NesC!<)C3ct0oh zpuQ`6BG_QTAr;v~%t79SlQ%Z>;8sE9)YE*&rPV{)@LHmIs7p#l*Vh^8mB zPoB!2isvwBJT4u!o!36zavW4TYtVJ6q(Flhp4la^AS4ohz_0gC6Q5p)lGd;nF)e$R@D`SfdG-s9j^dT8Ru_0F+@igWS!*QzNLlK_34)Gxi7k0IV> z1Y}Sm+5fgs*f7ZwD#gJ4pS}|t+cEbMoI$8Yg4MVID*E&(*kN~pU1*ab!;S=($dn#eENhgKl=Hm>4PW;<*qJ7=lmyp@`SK50{sdc z$u7=~Bsf!huGA}Bz5V@n(Cy`5{W>q05@{=k2iyDM6%0&?mY@sCB#2WV7-sPRJA&ML zNsKW!$Zh$z|4YS(u$CZt6m+EDtb*ddw@Z0>1*_}oc@0+6l|$<8Qm3!sx`gKic3kJ5 zw>r#K0Eli4XRNe+y_D}S>m$qE^K-+HHhv{^wE=Ugf;9K>MZgk@NPU=ZzgBeM>1c1S z=ectvmn=3gx2X{(X~nnphNVK2T};9l$!y?#-lY zKt*nD1JE+PMKB~f{D;WbT|u(I8wFy+rtyG_U{FwB2^Oa#MxDIEgVE8hTK(G|FXP|^ za%{`yR#{x;itOyM&m^Qdxk4+-GJtM1eU)U{<$Hai(lv!a$fmQOw_kg@rx+KXNG(}5 zNLDsn_GjI}!>}eQi=6=&)dAHZo$k{uIhuk^%|KH?}*^i(weEa9I zs-t>(jbo0WV{Aozyi5Pt`;&wlS?<7K*4&40-r8QXt#^0fq%T701?(;570l`R)}J3r zPE_e<7%nZStwc!hZOxvTt^3?VGnz9^YQMkg{iERqyfNvM&dD(N#zyf$(BL~?+6yHUN3*eZW)bpIv>QcJ;Nc&@ZX$FuMf`1 zr7DN=x2@!wU-jTzniZYH1)9b;Z{9rYtv6Ni(7 z_U-YpZ%k~gBUidLoo(OjAHZzm84|Rmfsp^6kQLH%W2SMYq_niaacf#dL*u$ircwHi zqwfXUl=8Yd;lZ09O8m(guCtVHJF@;NodNmDf|8PcFa?T%o1D5U3mgj{laOEc;-_p6a+QeAHTW7tFE?^ z?UM}An4ON3RX7|7YVPbjr8%zjEr})NuGl(V7UFZ978%abW$I|}zIb9A67!9X67A0d zGQLv(o$>7@#`1!A3lG8JGy+U(PAR3V3y|;zFJJaZxY|k$H{U`Mj`pv&3W|+ZVFdr{}Y} z`u=3Sw8w1kSC<6x@p8(2!ys$|Dx!+^Tk1_~;=2>ys%@9dR{FCl95ytVtm2ryxuX%w zeHl_SfrPHC+!uzPflSwLUN%XicEf&Ake*~@t~*iiEmK`>TMU=FK9(|)I^k?K#Sc1} z_}C+Cg&E*%Iivf_39p=^fSmrx0|E6ZH_vj5}FtjO~q7;~RLe0J&{@%m_S3wG2wHqGFU z*=fCjcN%d+%51vM`4E9mq2}UJbE64}(~a6MU)r(#$*$+8M=hPW()}=SVu9Hi<{0Sa zbyknnW{+SRo{B4+Dx+ZjsMG_8RbkWNJ9PKnNJmuHN+!Mjp$WADZ@#P}K`!|8cMKa05F6@!xDyZA$ZVCfj(fv62gU30Je~%N z;2GUFIf}^@drRH^bJX_dXU9k5Nse7*JJhY3=Wv+@h8}Tu{C+0~|0kj_n?EU5r-s%1?=oHWWMC3EvnqPI+iVEk0+g-w39Z zhzus+l1gvFz@fVvO+-v(#MHNZ)FD;xCVP9W%oiV3SX;CQA-mqnM~){jw%+S&zPHYl zMPoG*{&-r-`Pi{3)dC!8@bFlCKAuncQF zv#)r_HGfbf3zI#0I2F635c^JdH+w2QYvmr-kl46VIM|dOY|0c8G(t`wt)>jDvrZr@Ye2)_ISdWX{VDFxo;U$s) zEqPN}{Qkc@SCWk98bqs>1$p`CGvraZ#Z-+|<0=?uRSXQ9{|TRffI!k~*2RGfSW)l_ zy;>ym>rZ37GcNnOa8u&9t?rt*{+AY6$zJ{}!a7+e` zePU<%H6uY{B^aUlM~Vl^uW!dUv&277yZeO*F^?L_lTg-@hvdc(Lz3e2Yo z>W_MEQS@h!0;hR#<-;v}Mwi2N+Wtl72RL4KpJ^8J3Jb|wKad-y(JQMzim{2fYUs;e z%Ct@ISNDT&*T4|YZjV&!4^;D^vD~IuWJ}5=7iOafySbH1ZgpcOhQPx}2e=MAv2_!9 zr{`j;Ph9Rq#qY{2vML-J8wQTwe$xV7oV8zVu`YW#__)t{7J2n7uu8@-ja!*+fYHjB z2~FwzaD!wpdN%?#sHm*#N7gEPkortvA~rq3;uS?d8&Aygwe}{oy*-3z1(ch><1Qg> zcZ*=Bd0ZNGZNEAb%m=nUjiaVdcdL@(VwB@sLH2Frn0$0?_iR{UV!fcT;Voxu!SM@R zTNlHLxSb61{=qMU+ zh#ONO1@T4-fv>u{^Q}`BNMD8Nv-jVBN0cCY)sGwf8KQL)u@nIUrYYS2k)Ep{R_x2f zdV2WpY+ofjh5UE9!!@eWI_JHI@be_c>3alcat60pMAErURn^9?!hYREPf{Qk-|QX!IOm+A zOBp+7&(F2N&o6a@5EO3-3npE+>Sxv3T~XV!5x08uC6AHKNV0K}xl7TJBcCaRpQMyL z=WCa8eK9j#?@E;2ml~2=p!tPq;x?zrt5>glM29LZb>I3j<;x|wFN^J~?&=9`86E~! zF^RZ5h$0@|PskPC{}{l35X9<%zJJ{OV3p1+7%gV<5~ebq=<*I^hy!%->R>mwTP;t% zY5v5l-EnI)(`xJ+hLtpl>eO|L>Nc*(fA3t6H~H>OIF1UOH=CBSHE+%&jv3%=ZS?PN zK3a@*7-k}t^|ZZ1j3FM${}S=r~P<2uWBcd#EIRp-^_z&U z$n3~As=-S038SwvYT@53IedJ?-t{um@ftn5cAJv;5SxF9=2Rs-ro$Bu>JB4NOeVb( zC+u9ZF;zR#-rg=5?-aUcz-<9FfIf%5jE#vI->95n+)aCyIn>{L{P2UTvvaliRL#T8 zdH^+_6Yhq@eE4vQPS7qllu<-5UrSE*y75I#G`YRBTF#w5XQ6{PxBQhe9{bk3uw1a| z$EMz-bmW%X=C>v8`!g)2^)v;b$?5p+d^OaRCoxRhbUj=z2FT2>hPmV=zqG9mn*t&A zbHee9d}h!dY)||l7~1Un*$E2A;=79_!iK&iZgM;t>_p9i`Lydz)&LWdE@pG@(~PIw z@1u!W6NvG>uWT9D?APG;K#8uYFw1v8;z$gPyvR^e{Pci*@a;CTDNWJp?bo(1uk$2f zGWj`$rAA#BkJl$Ed7^w1m`LSP@09U`F9d!KUl%iMkLvA?3Kx~B+2=>!!++N1J-=*R zMz@%aiSj?f!>6`*NuBFmU#7&^8}6aVhbvUIK_&|Qamhb z9oLUvME7eA_V6s5LuJFi`ZKyBvODA6__~w04<|e$#^|y{$HBIfKsw^ygi86Pe}^XB z*nqV(kj=>B5{kN1`|S(PKan-lNAlcfGDwa;#g6=Mu@mxo5*vVEs!NDjprga+zuZA5l%=%bJ z6o3dv(Gg>$kz0y*=lqSw3JLs{I+P_;#k9Sisfzp7-we~EWJgRXi>cz+p2g|eE_IX5 z@#69JAB~rrdcO-b3%wh--m@b&FCAqp#%diV_0@kTuc+brx2J^{s16cOBQhf!sj^Li z0eHXg6V~EKCQnNrDl#%xKkcUS>M%x**|1 z%@$bv*UpCtNuD$XD8ro;Kf-dwpPym-Cm?>NU}D>AtUQ%de4;7!-fj8h@*aZNWn%Mc#> zWTQK6l2f#|5qbo>b;=AHUt%kO)3f8o6%7VXj(e{#jxJ?u@wbA*^Ry(T#b7i--+N=Wb!nngeL#c-R(#ODcBP-0fh$m+c^}k5%xdMK zF&FZR%)jmrl5%n}zB#=Ze5rEoOZ>m>WPo@rwnS)CuGY{Ca6^Wrj?c3Gif>x??`Cn} z@yq^!&D5J@@L;yw4D}Z_t$DWRzd8x9bB3SwyuPDqsfG^iu-f_nn>)B=blc|DX5RNB zF{va>4>=DEc~!r*y=$@RHf-HJ-PK;OF>kSkiycv=fuIw9l$qJ|;yf~>VINE^+73RTJ`s>B?{q*Y}@S3k%aea}50 zg%Wy?A1^)hSpw6VIi-c!<}e@>^I8ab^z)uvU#;U-h0s>rz-Ht58RC=sZC(}uuZIWX ze%TDySUqotF^Bn_fhnQBa%<8sw)cW>gnOv+BU`C9smh)(ef~6JG3c=%;~c}Kd39Xu zL;q)zgPRI0UwH#OM<`oUoaTtdModCbO04eE_6Bz870@(jH8fC6ON2H*F8Vb)xCsrW?)a^!z^cnH(O)K^|uK&nj{&cKKe>rDkHN; zPFgN?CylfNzQzIMEGZ1r@}vB1o~shLp>e{?hC+>=GAkH)iPtTg2g} zAI>{?8tDyO$G!HGoqxsmo;&FlsTH%3a>F_Cq05z48T6fM2qgZ3`zjovL~A0eLMVIr z@?{^$>3XZ)h&eQOCySnB-QTNkO)Z@3hl46kiQ2uwcs{qmZOZ5r+Y7)bH%D8+de4Fo zDOO!yF8&ved=e3&Qs!0@0M4f2Ls=d+@~G z^NDe+YHe2AeeQnGty8l4DOked$w7d@ifDn8(c_w|>bpN?W@{TV(#pXRZaiXnoU0=~ z&gTQhBDtl(jPQ^5bI_q_9v)UtBC)JXUcawG5uQCh zM!Ht7cfie|y?r~2hG)pHQv@WN_CaO^(I;-l5uf1Lby02-Gl`R zh%q~TmpOc~`dsFR03@L|LTTS!TG!yMwBf>OA;f_)@@==XbB~YC{3@@d?MCCAYdzqnCVf$eV-KiH@-O>ln4=WqVB7X(uhdl~Y z3lmq!bXExqH83M2WZt7zXw34>-Dn$&3VeqiH2)Do%jbbXF=wl)s1RCXkWJo%6r;~+ z2%NjSX!Ey0pngFh$>sx3Q9X!(^`GPPjZ%#^9@TI82JC1pNj}ETS7zGHO1V+g0-vY# zM0f`B&$#inEe?{6(d#D(R+LGX65NHnU2Km|BMYbtM=0q&c4PY>ihoh9@u0wR%&}oF z=`PZfPYRjS7$N$MHdQ?S9mOiHQu^`B$41nt3~@Jbs8aZ39Kuylys7YjgZwhsH|n(H zL+A(5Ny>RzJ@Ip9^VOd?5iwU+#b3b)+zGD=ACA6-6XQRn+>E#Ow4)_xPQAF=?Xao= z7(&9^y*~oZ2IB?$Q$7-%YltMm7rJ(N z51ec;f*o5j`L(G06^}V^@yl93KZIf(Rl5qXC4~5k{$@wy=iE+f?wV(P7xSvE`nZqF z3u9u^aZk-Q@sDZ{TT7IHpJ_aEFVa?hZvr!yC&klZ14Vw<%s*@V7r&lQjC^Ia5tMpQ zEB4noFFdEpPcilSW~|jzvZt(uH1aXv31q&U{hESv~NV>q{slR z!S|InzKB_%;ZpFOeW&T@>?2io5z{u&rcrLs`1+tzUC3%VcVo&CQL%BSGT6hGmxpJ9 z@CwaXfIfH2j zuSV2{s`n~PdptNNbg4Xqhe!DDGI_>M9Xp>uAOT@>u4=Z3KpKSCK-&1cvK z*#)Xi;v1b{{yxph3TMQI4dZNuqCTn#!&xR{mSy!%J}{sM_dr=^nosoaLI67m?t+%j z8v>67q#P1B5SqtSF_xE`>ZL9Y`bk%ZHD8xO$}(FPAj0Jo23=SppxaOl+rnmFeeSE& z=8WtNPih7i->FnUA42P>FEHMqFOHX6mK)a6`sEfAutY+`O_~^)6`3AcsUBGx{}AVA zLK!%S<}y`k1!+O!ho=PM1Sr6sj+hka)*+IX{&rxx{B4mKi#! z_~2NU$^C&aOV&p=VcJ}T8GfeHc%S5g!)m9my$a!N=0MpD!I3xAQilSeZ#Lf&d1sDCIHlF~iwsy2diW%k1TT zo91Bd7ycwW|H|`5CJd73M?MZ0*JK&e{pop?4$_~>gk}4q<4br7{9+Vbo$e}^#$`e! z1ksZem)_)kdqRu5o?f_D2EA>~#%C>ICz@?E6_fP(%ohwZb?>Hn(%oJlOG!ChH$G%K z-(i|9O<%DbjakjW1`fqH5XR)Z&UgMvf>e($7@L?q&DNR^#7UXmk51Bb9M2BbG@q>v zHtT9HSAmTcjeur!d3g{028D!s-?O{p%m%jN-h2CzUR+}O=`ocnq}yQ@_?pE$qX-l+ zWnO))jZHND^TF&uz@iJF*ZMd!l}!vH1yXR0&Hi__D$qph{+q8uES~;>(CC=BLEWM* z>|=0U?L>cBx3qZC-n;dOLu>_TGvDAzUq1WxjbTo<4$EuD3g5E5Y}w%NF*o}3hPWRj zJ#zkW$RunwQL$y)S37z0OzMVAcx#Wr;#i!1-6FU z5{t9*5~J*^s>nWGSDygXNjv$NUr!|wnDUW&#%Cq2OA~>1i8_v=n+`DV+zPJoDe>F& z>v!4=HDqaTpy^&bb)jicL|69+JY`@RrMbG`yK6CBGEuGKCudo2W4nI11I-`JsP&bg zD}tqU)C;|F?nor(}_7$ktop4#@#;u(fZ@X^THqZ z7<$vl=!{vnyO=MMEMj&Kf6$SOx8Yb&wKC)x@~woi{nUI20->Dc2Y8iPHyn7|}K zZdIDZSil`^M;R4a#&0os&Bv7`WCnP238PyK-;53H2#i?vZH2vfOL+UnsY)|)7wZ}T z3BbJFxZf{7TT9EHB)24OA)7AiW+aaD;o%#hR+b`YEo#GO(AS32fE)z?1Hx`(m%$%RV{P`MZnI~d6a(a-UB(T$I6qxDBK<{3XiD-w0aW?et-LAU~+c-k_6 zvXHTgFVX)F&+Pbc3z@7op5nobKPu&8Kn6<1^+y@spZOE9YzF0cM^`*Ia!?&VO@{1` z#1mU7-43|c^n{~F9jbpz+0sMCK0KHI6r6!5E&YH3?>t_LFE=xs1VjCXV;h>Ro7vSXER zES|z*z8lzIXZKSkeZbhPW4OM;U}c>7M^F6(G@FcLch^|0S{89`cdF7Mr$l1NxO~8| zI>MIOlzNfW(Zzh8Ym8GLN4(!$hk}P>x5qHO%$Xrjy2sXsQDx!iRXWPGx;v#&k8{M$ zXu@H=l#%=K;?brdHCu=sE!RYa*~EOwxPGnE=V2T7yWcV=WYwQo!@_?Jw@@ftv=8bx z7RPaZp}R`czIc$6c*7EwRMP@lH&sa(4s(R}OS4bIN|cMBvPxAEs`@+)yUFffonFU1 zNz1i_lPFfle4OIVlx^sI-kbBaQWnYc^n4E$!Ft?-`)Urcta#?R^Vj!y6Li(K`*U* z8z|5>PE}#VCe?bA2`IfIqh>r#%%zZQ{Z238O!}31(#WDOU+$|cEUW6eKXe<}WeC*@ z=?w8?__-c1D9z#lV7u3|?xXuQM}TT!S}?m-q*E^_*W-G$NoMm%d3h{+?@s@X8nN^T z%5zjFk>imKR0Yn82Dab!f!FrmG7OVqmAw-#ZIxcH6o3C_RP1hq=*%nxLwdWdtBdUT zYZC6os6bqrlfw;?UAxQH$ukhhk=)*bKT6RD#%jB;6BC0?QUWH0hQLD_BeYosZ960t zOr4DKzR&vz9^FK_Pn2mcZN+Mt{#ADA`98H1)SWjDZYz3CnhI4!gIelJFvfCJ7|K%< zYJxK{%q+#M6U%p45<)4V_t(DuDDSA3TU;8>RV__#fkngTpeb2ac|YMI2D08M+!R6# z$_XUSay;r}uVsqYl;P)_z%tzlt$Y1CZ{bK8$?1s`tW&Pj)*(dqL2`wG;EYDs?OwnZ z>8hT16A$dTgRbJGJl?q!g~;W8Ky2Gxd(^~yp&v6!7XxALR>;})>*aY_(D;UW^T^`=lv(IdX}kX(I4a|?PI$&+`EYBw8r^=;fcLC z4y4Y}zCUBS&S#@C^Be`k2YckheslJ+W3$IlzaAdU2&pK zx{V@jyANsJ7e4;M-?5vEk><`ju;W1;g4~?1Aa4iGeb6T zii%McTppa_TKQw`vbw0vRGq$S4M|i^n>B*Ad+Vg;ucC^P~ zbK3HXf4FZzd6Z*&<~{wZo(4)e!#8s|!pC(t^7UdCY(($K@v{ka_@bij)*!uP!!sC` z<)wQXomM`?F19u`igYvHNlFim3S8o@xBUgqgBl6S)RSCLQrazm z>EhEh@xS{tt=PI@vtaaaq@EE}#XE#NmB6A`LF{;d#=UN#!yj~(YK26?9s+I0TB_9xGbga~BN!9wN@8N--D_~$4QysC;0PR^rXMkTEPCsbAf6bW z@UB2VFAHz8pp7$YAGH7jF_U^?6FIg!Ib6C~R$LY-YxQ{i7FYVgN5&F97U3lnXlq>s zhq69kHA_#T_Dw0d(FB!hNx45jNFRJRw39(bG>0@2p%{Og!t0UpAd?dk7u$7i_Nz1g zMo|pxp+O5cgOI5kvZeste(7+?#?#(nhDSufNLoi5B3|3XM4l;|qL+4h z4YmD^TFIx5T_|#M#nCF0%z456JKwk&=m|^S5~N>42+pu&H%*l5EoKCR!J_yy)A&*T=KBKI5DB|?ui?T|;xCPcKcJ;- zPQ7x~skb#_=6w_~VI8PQ&QclVqY4ng6&*NE)GBj|?25+@Sa|plbM+wQn}#&={)NKt z3}P;mQOe4;C$bCKUZ)nWAm!6Qc% z(9Jn!$MJh0z(0wJZw$DFytW%pL2XH)NzE%bV8%cH3fgM6nGJr8wN0qa&m7w6)PGbh zs`$yfl3+4_Ha}tK?}#u^i)4=w4A2OKc;omWT0A0e_tp6n&s-Yv{~+Cy(Yhv2Gek=p zL~Wyz%zsL^L&Pm7Poq_%C|_8b7~(>AuWmH^86!<81oNm*Zj^{zWt=U=q9XPjEkT)U zJR>l6%XuFa2_>wJNYNkSF5DW|wlH;1X@|CYnTd9FoT8vdE@j8BVn^~M>+4^8$0-_| z@iG>l=LYYwDY|@nuIo?1RUFwOFNJQKIBBOiJ}G5^?39UFv^l5`^P14ib8dg#Bls^pLBkxc78&KOJLy*2uZ zXWk&(0e0pVYChJ$HfR_}f{{KkevWw32zSMxKKIf{vAVz5ZTKnfU00`*Mvsp^A~WRT zEV8<>_AFQRv0ulj=<~EPJH1hVTAa8fBu^1Y`EibDrj5Lfk5Y?dQI;WY6sjyMywCLqVx|st&qM1R`_5uXFh|FoB&5yN$Wu5j?qJ?-n{{jo zw@#>H!W#b7p!~1QJf!=NewjIL3B8NwUOHMC5Zbq0EuArjj@BE)u4VQL@wBQhXo~4N z9`jYu9H){HMm(n=6!v-gqOV(x{uAiI!zmEPM&YUM#9BPtP!o`ACEU3-M(dnd&UvS8 zA|Db&`k48t$Qz*pCs4-a{jR(EAq!tWez59GB7fcHY z_;wCPY7o$#f=Qr!nE+ZrEqK+KHK?jSb2?M5ByB`p5J+jlgCH?Ra4~%7qe_3yNaPq1 z2+|q>vpN9+&=OI16gDvj%A}?OF872G_O+XKUIvE&GX^>+y}|&>>WygR8c6hlpCRqr zH<3*rht`%dBU=(AAcM}JR8Jx#$HPiL0?)gE=8hWWZrav8oY~@5MiPAJHF`=iJ+%^p z!1{w>O??=llkjdEbc8UjY)~-(jHc-1Xu3lia8ibUNzDAa=S3rC7D`KkD(B@vwJdJY+1q*XY6DM0#yt=!l2{mn=~0DE=OgvS-yPwn7cGjez<(w zyVI!uD%^h>q?)9&XBMgER11$gbq|_3k>fx&AO4Ga|CFi4kQY|&856U}xQV=A{oOyF+xiJsCkyo6P0^3Ldx;W+9o)JUCmcU2e164Z@16XR7>pCcw*ic1Hu@IF#&rEef^T^ zlnr8_9lrOhZFvx$yD4quPj%`ow-1@f^o<~=VVRDtZR<9b)@=mG|2A)3A#`Q#-xFMV zM0z&>G^F0fTM@d1q8&w8M@8e3DT4?S6oN{G`FG^2m#%}c@D2I|9;;pAK7dE#hiuYI z(SgE~l}~lucW^r|A^-``CPrw>yiIMAJnu)rWC6kKUm?7GmlqfR-a879k9|a_P|2jo z?Vky_!(~u+|C;-hcsG)x4~>~|?>tFB^LoNv$aeYUeG)**-1$>QYwJVr(PE z(240D!uL!9ohdZ|-R6aOsuPcP+fFl)BK5@K>06ciiUR>DSZ7C-1Gk!S9>LLC_kZX*�!OrQIh42)!xN35tq} zf`ZZ^GzCP8fFQj|6Oi6Z5{jTmM~al7AcE4A-h+)^1?do^_fQi^NbcWL-gV!5&i#a6 zV40P@_sl#q^Lr#ef@a=;&}u+4C;(hveHXhz*x;h(Grmaooj!`q_=3@e8sSqR>hoDb z@za~b23FD`oAF1kYtQ+n-Zfv)LZ1jVsrxMiU0l>i9J%qbc3|O<3f{r7o3hn;G2TUb ze3djrz&{mz>l)dYi$LCA13csLBzrt}d9hP5Vt_^}W1X0nQ zCovYabBlitHMHS1b#vyti+dhQer@jyFE`O4x;N-6%$n?fs%HmKa&0MPQ}{^@SVdj5 z_V0;A@-{nhEfIZCyxrus|65V;2Ss(ftx`yuZZ=NoR(SK;a4G|G^0%yb`85wgm8KLK5Vc|Po#Mvs=AW9E&6?-6!-DTX$;_-FH$e+PYoCG&egDvt$ zypnmfI? z9=hEeC-3t3{QjVIVAUY9IzIo^bZ@@d;^LT9$c@4u4OhJH49&2oXI+Z3e_@so6^~H@ zuCI`xuz?T%1^CJl-9YE>wD{0uf~?@fHw)aw+Ps<@nxr(ekLCrO1{Iu?T*khvm5qX2 z-_$s%W*(`oLVeic$(+RS0bnZjS?ocMzHQc zPLv)HnDS?|68-!`_{~lelx5z3{G$@5k{^&H32NIWiiF$**8bXSM)jmJX~StUzO3!)&ag z^Fpbc6Kpt2ZNWP`9wy&Qv!5l_G$$yDLK72kvR-@D?vsr@cQ@Q?y$_wlR-QB|pQ^L9 zzxbZVd(l&p8s{!{B5oED+A>JnR81aP@KGD_2S$sA@`?%g={uu>2;cHJBTm7F@DD9D zHt$==%Lc?c1Q|d}Ae^QGHuqym4*p#prN0|W^8kt4Cg|7$7UfKwJ6={h!h{7-d#w|8 zQ;$e-JJg>kL(m=lgNquyT!Rm$~MG=7XQu-O?7g}N^hlx zV6*6E>H>t9+qcq>s06!m^JXu#a`1@oRFAIusf*#|$E|V4lw$4O3y{JKbfC&^J zANMt)naw@VlqCYQbF9k^mpDj#?Wz>uwH>AX&_yt)wrxhqwy^Dk*yQCz0%EY>2#qT~ zRqvu4N$!{R3LlE&P-onlLL1Ka8r}5RelXSSP3AmEf6$7S9M_DbnNFiKI#&m$&LboOC#C0S(GD2U+Wr{D6^hI+D_cYVd$ky0v=s zj<% zOLp9<0Nc3&njrSP;#tP>@TH@}G>eR`K&FtTS z*6#8iVyCk%z0pBRut(?4vC%EwxRLWIl{Qj}F%*Cehh&qvG#!OX6qkQyyGeO-7776p)+wV z5cm(W*bLIF)m0~5J~XTMwAIXL?E`TPzjZGN??N(7rkV0^r+r19J-N8`J)d!WIA%3* zk#jWiD1rvU<;>Xq!?hW@+CfKJ>U4EIKYsLa17 z?%QTdIQ&HZ^>Y64bcx3dkcWni?rWqd-&aXjeWe_svg#C1jn|xgQF!j14~K1Xw(VG4 zWzOI|>2ES(!Gi7{C(;$tmO=4f?oJWp??-Nxro}nOs|!2aJ+QsYf8oTgTK(87)Yt7m z;eOkieKq2jLJ8~-yXZTn2(5xAs_MRpOr~Z}B#U18?uI9<@>i8qn z+|oY4inQWtyB4Y6hHZI$v@GAVWsM-0o(>==t!TdbK;PdUAM-x$X`TqW{4%*vcLs%M z(kP_-{>3_ZitlHV)_C!fvXXIby!A9@bBD5hwaKI3S395xXva=k&k+U_&@yM<9VYI@ z?sOiT*KXC$k>)+lOOPpqwYn`}&f)qm;Ta%}?IQ2(h5QE8tmXbJNBeuMJ`FDUxu4`< z7uG6pC`N6I=9TByIY=#HKjEuX2#M2G@35p3)+T4`CFw}L-10B+N@v>pXO_hLR;Kd* zkfjEMx45{tCMZPozyws#lKc#od8rfH5&l$HQg=Y|p4VycW zprQx-oV4dkPZoU=qeA+Lhcwwf_-cK@3nbxPBE4y#_#-1hKd+4>#T6<_h}ZxtkHu!> z&6Ai|LQsFSrXNq%mC+z<#6u_QVc^**_Z03V|N%<8w5+6$8J&C-?lu zg6^@1-kGeNc2XYSfpE1Rk0^wEg`$M^Z`GeyemR>jaJt(3%tyAYx97_b8Zzy^%5_Cz zFoNeP;DDGhI66cEHjaQKV)_EOUr}~!VVVUTvoGHN?OpL<&VUtSJ1EZ7Z_;+CLGwl!E^hjjQNoi~2fYnn0u)eSVB-&5A`j-ruJde|fFK6#dw`X(WL%q9x=z}F{X z;d6ezQ0GyOq_BbHC$J>gvsLdPpg-QWE8tJP*TPpXg~abT&F3r4nftck5TU#Kx%~|{ zj}64Tqx2Il>dssdIegd0*7%}Tw~^_(GSdeopt$}TB)evyREXP}s)HJn`tDp!a6m#N zWDMko<+*y-M0+o?z2<4i@fJ$>mZo`d0DCvI&ub^M^ww�|ZsyyJi0~s`{$<=6uhs z!k&D_sKE6|Ze40(QCr!&6I|2FlW%F~HbdH$*JiE;^R zc|Lz(hwF{e#Nd08=dVE5?mE0nV0t5jaO97r^{b3;yjhHF)QJ2M1RF%#>d6lZGWlX3 zPlEb4VSTrit&6NTryC@1VjqVyenlSHuu&bNfvuckxO-rMoch*#4Pe!6oE^IlUR~L zgkk@@7Pa`~_Q-ouwx1rCTR}(in$^6P_a9Ro>{QcS^OLWE?#Hl=ml?29;x&2G^7G?= zjdiq%9$CMZ1)#rwm-vH1zAiaZhabf)`s&_J7Go27d>pE=F4TC*4$8q|M@_VD6WU;- zKbsu2`hrd{_MFbI#61-T)uMWGvilIP)?wP!Yg~!3=CR@mPS(S2!Auno{R`hX-;atX7pa0Py_ySR^lcmuA zkL}BXV6N9{x8$~9i)+8c?1VEaj(t<(W<)K{-mimM#$)uN`~ppKeTG{00+ZfI2Ob1n zC{E-ywo6n}=vdE{9zI-?q+1*noDXkQJSLoq2?*?U;-|JEqg^*=SdFLPl+Em;9>i&R z+*Hg~5+i}@>4l;4$>~TdX_jN1$Gr9rT9Ggl^!O@fBO6Xm-}hh_wewMyw1`v4sjChHF%{K|c9dAJey)z&Zkxrqih zkNq2lrmVq%4L#SD{c(Xh$u$Hys zo&czp7ai4o7PpERhoyq{Vo5r|q0kF4!rz5Mg?)Vbb>75oEQ#eAK0D&u9Kv4Z3%dKT zZz4ltHzWd#*-Uiqm+H^f5t!^ayIs-sI~a!`JQ#o6uuBj!0H6&6lk<@ z)@Szf_IV;l5}5jh=ZGUOs*_gd?N86*^scw*qMKc;yHQDc4QwM?zyfwOf>uBs!ls!qr(F2Mqek=V2Z5`i4t93~Kb#iK zDW+XI+0qDSs#VSeJ{gb_pMZ~3&uxWCGUk}N2y@?sJS>KNReCW0(|s`~R7KS;Ty z7B(^a@>&WFuvmF1`-T$ZKZ$&RHdz(((P$?6pITv^ltrs9cI7e`@w}903eVkAjR=+c z*msT>ri65brYmc!g*MrSk~*Jv%4l*F_C;XS1c&U;W+iLv#S2b|1PgARp(koyElrEu z6Y5J66zRM^BlOiK(cPzZWEGm^j|nf9{5<6XhUL zFXSxQgC;06StDI!7?G!^Hv2)(qX)-VBu+Vmm~SQTwJlLm!p>76-e{|ShzLx?P(0w2 zazz=B^h+UpG(eh(2PPRpWG%5UrA`{Ms(6PWv0|=;mwFFDJjO?0(CO)RqGN8=Tvq<5 zxq^c;KVZAf9_UcwwD6kv?w6YcB;7%3*l+NK6)sNBM&+~}H0-t5b%BE{da1y(ls+l^ zc8QeF2|=iD=WuJ9v41zao)sR5=c1y#d*olIF^g98G0^;EQgPjUGstbCR{UViE%845 z2@b@yH!x`~h=R+?b_wpI*d#ybRk91t=Uq9;uHCHtp_D&=SjRO54BAb#kv8T1H@{z9 zsh1l$vnv>ch#Cu-Itn}LYSgj4`E|%%#IhLujCEESJJ*Buk zPf@X}Uqla??+wa1%3x1@NCYkOre|@dCv_DTX9zDaZQ?}aLUVZwFjJ&=qO>EeKfEM1o=c9<@2$wqa{G6Dh8Ih#orT(Yrl++ zZvUZ<&O)iM1{^EX(mDYCqt-$WLn89-y8}g<*ty!U3Vc^tt=r+{#AUf3%YE5g-`j?V z@3o7jmHlEUdiA$a43LjRQLEEjYuqY|askWDuQ}HQP;XxQj1)1hw1-ys%=Yf)NTv!- z$)@{Q_J?Uj`WV;Vu^dM$1YKlKSWw9GaademozC1UB?;Of&K(szDAZ{jI))P;UhZyh z<^Sfh{v*6_qFPCs7^eNEP{p)Fw2fXfW1--7i72AVfaP5HiaK$BDj#S>J-7k`bUdp9 z!Nt211*cl**S-afUi=jr$36;etgTO9gwJ_i7gdmXZf~nAJ1%mPtCFXXD3fAR4R?rt ztg&{-9dUHY@^?|#2N(JFRFWoIWADQQ&w1|dvxnKM^%Wms70I-sE?>O)9nLFu^B>aG z8fFx`jnL%jYi+)eoFTaAcjUXqs}YFj1NvP7&40B$02}oG)ArQIDuKQ8 zk2D;@0NDJ+P=eRPxYc(hf45q^hR#2VeFkg?|DAWshKY?alD?fdHc%^GjFp*A`crg8 zGy=lT_@Y)*z4MCQg(1EdSe`D0KA&SdMk<;iG2tL|=gZ^=5&7)&oIoPZR(sx9J#ol~ zoM3#U@g2);yvWzx?Z1wEz|nPE;RAmu-Gx$5<>6StpnhI@QYU|`(9nIehi`OXL9@Q` zd$GfDf?cUiG3qUCp(Wi<#rbR6BTptruKpd`H3{)OR13HF0fyF(*5{O}7esWM2ujnJ z_U#f&^LD5Zer#olfz`fG!qNo=d2;EtS>c`;xqEbEwEHCch2b5ql2gIN9P+^#{8U~T z)NSUZOmHH+sX~5p1s?R9e2Q+fr<{IhYe;Lu6wGinfM)0JLl0TMo_?wj_hk=@i<`qY zJ%I~X3$PO?d)*=agTk$VoUPE)ExO5>sEZgE#tq=SU-8P0pEUWl_0f>6gQ&^36E9K_ z6CwMnANx00Lb8LBs8xjqW1}*^Di9RI${=JYg2%H@rePSfNlqS4r-aZiI7 z_Q^VX$mL9j*Q@w;h)s5ogkh!zm`aOID^>-WM9o1(+H#7Ethh7I#33{j_BbZQ?|A^B zWv69jdS8i%Z^doeOZa-iUp#b1jO9IGW_$d0J9kJTWjUFDi*-V7N+n&c{9adeR{`gE;?RKI>#y5Hhk5x= z)z%$7Cg{BP!AGB^tVrai#{-ld*7aKJLP>F>%ml=4-v@^QvhbJi1%KHTzpp1fl6w8a zN9aqC>Sw3KB3@0sDQB9}xtR=#kvNEKA%Ghg!Nw-~UXJ>)Q;}Rf8|FYF>@w`J)*%Zq zUWC@PE=eyM3$O`>)AN7*s9aHD!G&lPmw*NiAB&Z!++BMw(eMszNA3QZniyFAxbz;4 zY=@7-52&ca3K~C7{=P_^MIo6y>5Uqyi`WgM9lDPe^&7;%bMV4`>0f`J@V|1(YP~H^ zvkGIuNmoc6PS#VEBSosz3k78F5lAXEGCYsF$a3RR#x5AS&-8_pBF{ zY3gUo%%%Oz5?^O#a?|7$xDDO`9chVp8Z5rvnx3BR=Us$NGf4_@rHM!w2+S4~ya((= z(nV~PTtSP_d_`Os2*1zEaJsKF4%K{M`3&$Tij2m$&q<$6Djyb#>h4)t+Iz1WJ#G)X zj;V0?RNDM;Y(tyYvBILt)PF~@^cm4c~~^_wNi)hYRZ|;X@!_o2_cA7ZO2# zG$B94EsT)X0~5{e6>ihb+V_fmPAvRP2Ku|CD=FW*3>luG)7yycaeH{gehR6Lp@Bk@ zW@H+q^#$O@pK7&3Og{(5@hgN{v`Tn9e=sV9taTqnS?5m7SsWxcP$)QYbgJw+hSefW zLEhRb#HR#&lsZ}?P^ob1ZA|-S8|Ic(B+He#%JX5P@0CVDy8#(jNP=UJdZbi{ltoF{jt9n37(nXz#a)P9VZ&U_pE zK-&jBS0SWeHb0_wQ`Pj?fo28o3U@10xpP$_e3LDDv18Q@9OKYF1-q|m5s+2v_j4xi zv{$aufoOG_lUIwRwK#;0@qnL#NsW|DDpH# zf0DAk8m@s}NgKcPdf?O1EypmKk1AX{0J6`c`4F`}@zBXJHWip=)x3A_ITOE(K0tNF z*>ne51Ip5Wxr>4IXEB5AU~4)y-ek+aDFD|Gm~DOzo|4&wtnZi<2UFS$LM~>KrH+O|CZwC2I!G-vwwo;-9k9lraR`fsD ze_DwTipC9C*IyjyL)(`V80SpS?}(Xxs$%gqa#kEh9(8sm(9SIcvVK?6N0E*u_3r!8 z#*Vtybv68n+uM{(?2h`Zrn8|CJ%su?@U)eh>0lH*CKJ8yqj$FZ58k-TWk#^VCJg3R z+!0MV$g-O#1clctgy?xZpxz)Z7}6o`&W-{_{R^xnWPo0mU!%F=I49d%Y;wZCZpm2T~m z3NQ7IP!1W~#_sF&BE-GiwIf((%8#A$G*R9s`@=qZk579>Rp5pK1g|3a2g!@RW5|SB zcXQXa-vbBCRL)AtzPaqSDYW_*pDI&V3AjzQS}Tnl*%{NG3>Qd>bdKeP8-1y1ITDb$ z5HqrthmG%8d5|9D^I^4@)pB&SE$*<4NJjsrNP;LSclJ)QdyG^kqlB>w!r+$J3haGnTm zvl~plJe$N?Phi2pv9!**6Hwec2nh&?AHui%T!I;JmWv?7mcLrY$8J&Ds>ec z?|WzX)9zYbYbcgZ(-bLbxkC!;-$J=WAD}a=pMZ|jUOh}H?8gr=95}6<%Un=e^K+g9#w+~?*M3fI zj4W|3UxPo$oz-B{mhL30TO}7gbWYf%XZRTK=y8)p?uysXM2T3d`C}I9;OV3S(*|$T zNa)X3yoLP~t>3!6w3T`SQ`*`+Wph6dR~>CaUeGdQ7sCyD!nHo=>*S}859@vaVgN%|kxdFgFoP>-(DS!yC$2&&(hd1w2){N6y7ngrcIJ#b94iCm z3X9+xMCT*9bzsGUnlO;ysgLg2E1TgXj*LQ@7j)0#1?5~ZSDBkTA8v`4oLdSGx7KND zqdF+_YYaGP1>8o0ntV$voNm_($p<)@VO6b}pRE?O)9liJ9)8qgw@tjA_Z!x7zq(gs z^dTanUtbxWn~wsQtiK#sVanAj&O2c5wk~&nK#3j2M&$7tB^57iygj@;9=VVufX!St zDO90808m-Cm9am5q-Y0|gM79{D$Ylqbn7#dSBtA)H$@}QH4mMHwa~lXr9#3*H=>%M zm+95qF4~cQk(0y$ewv_ z>jDVzI3Q9^R1dfqZMrU@1^@MT1g-(UBfzj6Ggg9xko>y;@a^0K3x64sQFsOuQ2)$W4CI&QoH5h3R-^LXDxc70tn;IY%Mie{#PFpYd48K5{16s3KJmpbTLkyGFqo)oG>nqE-U$7A}xilc=D_njctVf06myVCF zieeOiay)2CTV7m1G5yuJXjfAGP&%Y~t0>4fe(gFnlm6ucnsjKd)6a-2(bHOx%ABy* zuOFo&q!nWhw6X`N8Mlm0y&lz63@W1}jq&BTa!a34i%v%@dH64F@TTd%?XWh|M=#J9 zu9kploK=E~HB3z#)&MHN4+iF_ zsVxQ@&snwn0A&v9`6J}qXxzCQDUUvxbd8wuNH`7`uRAIG!D*k4_bS$0{sbDxT9pUh<%!E~ z)e*m>`LQ0igA{H5%isEXAN=@C9=slcczh=$(&RUoOq+2#^vq&A*QdV^d(6%Uj4S!( z6%0NJLaH&Rviw+Xtm&{8K9^UUzBj*fk+W~mQJEIGY zuX!@%WmwYk!ZEuy4nx<2a~qu}3kPI3Ka@Lnc&Q8&={U0hFg32T=UZG0%R2T217S-)oV(V{L9QWf4uHl?+>#AV7qQD?{rs`Nu~9# zty@DUjB`3sNDn8lZ^Fx$9Q+^u8by|mf=)q|wCoVam{OUYed(2Gv1#0Ii}lHK{QE(~ z2>b07(Z&Lv!Qi_=pVKlFELcbY!_8Y&Z{Mz3sseS;`1w9DLbhD=3G>JdN2X=_#=0w| z%xF`m>@uU&<8v1+asw-mtDHKZ4 zzk|;950eg%W=qU7YzB4aE}_9s59Q^_#wd>>ic40>t6V;QjvT}%tH#JM=^p>8&(8!o zzlh5unrP25IK`N7W!ZZO!h+pALu1p65Qkc$8V2cdekH<)z1lD4=RXdx4u8?PQwt@2 zOl)qW7>b;3%X)no1d(qbl!W-|HZwMJ&ZJ~ym~4bp0Qdz78w`j?`dLodix)3AxVQo~ zGf$=PwOtTFf@mE3%zwo^YXvHdF9kc7$Ae2@@1{Y zPDAA|6y&)Cf3JV+bHpNay0_o^n4L z9{o1=P`dHN(t$y7Gj83nV;$l5e&0o`W-jbumMhvT!2HS_*XX>i0ZX+@=k1+~l;;Y@ zQy37rS+%QIK`eyj$;KPe7>UHyxvZlC`P=dq1mO!8o&ZqJy5B%9ktvT@4X{|1CC~m- zcFLdU#0thi3xExAe@eF^1L|F?w}>xLI)^ebZ|`P*ehBtd~U{1|IA z7M$NtmX99U(Ee4|M8z6Obs|fH9Z-CjKeRgmZ0=p{>XsF4^51!#D<3fFcJb{GYFZk$ ztKCLkVh}yZC=!&Hp7O1_1GUB8ZG-Aq9NON5=qPdkEb5CQTsjM>o=HwktK| zk;SDLZQ=JHbzZ6a8po^{yZa(5gY!SPON8mun_8M{4mGIChRInw9_*YSVAe?~AG#$6 z+6GY~pPm~S<5UK>#_(e+X1E65U&f$+D;k^}>OOj0Sb!432$h192I4Q<&{y7D4L&(= z@mYk2ljXobyEMfKG< zb|#%q0SqSEQ1w!yHJ)%*&%RPKQ&UraG378-)d(82SKBNbW4Dx zJ5lJ8qR;Xxn$*EWx{E^2jsUOu_wTf}R^^Ds?&Ya}C}o5Tz}sn2?VMa{)^fk7zdPwx z>b*w@lhLpV;=9FAMCKLD;j30dBTq9HSZ^9l#f>z-Pts;vzfO))b||Y3S+W7os~;Jj z*)I9IBXNG?Zc@iMTO`Yv!{bUG+5;oF)NYBPo z{aYGqT7!othz{yA;1q#k-u=$ddtCr|{Zaj+eWn=!;Ech-;zV)!SM$-oo+9DaVDf^2g~6rwCT87l zYBn4I$H#LRBaItj?9Fv$3;R%4+3Jh7ts@tSbYkvD0?Ze-;2!V9Tcv68mt4 zo;VG`3tK@;h?JC7LaIf$w~J1hqRbwHWx9KMn5ELl67rq z%{JMVcLfkqU&-ATu|5OvX$~!jqT+|GC>}N~F!N7gRk9FcJa@ek^!{YM&xt=47AvuI zSwR|DM2Fx&e~@#b|7!F&^V1-c!Et}VH9-sU(o7$J>X2NmiEk7D&g7BwHde=Wp3I%k zD{`b5a>U)t%%*pfO0u~U`%c#qPQt1A%tQCcU3%em?F=YGgry(z_Ag+E5I%JbsJ*n# zdO0!=^TLiy8C5@RG|NxiJ5Cgkcf`oLf42LikCg)dx1|WFIoR8RADd0Ci%SOj*Dfc(Mx$<(QVg z`b5h$?z|PBGG8QIVTv60ROheM7iYA0`d_0@T>;2m7|1jd48nU?t^&WkQnEnY)m{k@x1S^HU+D`H??XW0y;i;ks&IvYwgx{iayKz5b@1o*iJ%-P~q!Wt7>I z<1q`t9Uy2(ICP)&Y1g@bUqYV++yAd(7$|>*Km>3c<=_K+hKhi~qY;2AwAkU&diA>U zNGSjUy9EtVcHJ%m>=h6sFGvx%b}o3M%p3$CI|mB4Ab?APG<#2F6IQ2vATB=Q-AWdV zto$-Uz+lQbs%Ku$It0*I5Xf_TLKaOlR(5ktw&e>-5p?++|Jpu+h!|cDG`0zto~7Vo z;E#F04O$3bwx2d?Y?P(_+m?_{!g%M|iUYA?!Yz)65|K>Rjz6@}zM9$j zi2z&OjH0S$vDaX&BJ3C}l#~zHSC9p7+8_@<04bQM!0{{gSM9yb)VzZgp2sVp{6}jA z2ame-h>)!_)XJ8ROI&fpTZ7}){|F=XhCr0^gOcSR^(C&HsLja+Eaxop_oQ>e%9Cg0 zkH-CO%U&|gdqatOB?vb#n`@SW;#tZq_DzsLFhcBmI90s;FX92{4L&6^}3U1bK5A<#sv zZ2w!svQm%EchxPKw4J;Ld(;{7*~qnS00GDW$SfrM){S`N{Yz!bBrxxr=T6@@yYjoYZrzgF=d6@oVSmE-1iWEDw{YE3#tF7d(||LG9iPiv z#R|{?W&?>^rLzI-lzk(ouCfMuDJ`jK?9D4O0^Ix3!n;|(X?)A2f12&l>cXbjmI}go z5#}2GJ~iWKL(X}&t05f4xq*1uLN#Y7wJnPzO|-dBNy+-ICiO936g&Uo;~6D{G9!e* z5lVmomMw~RQ$#4rx0rQM@vy)eCwLQ`&p7$2^)ul8bPT|TU5G=^zpxc|ZaxK`@oK4o z(Sh@lbo#J}+K|I_2Qkut`8wyM@#R;PmA^F;=6__^Y^o!L?Mc6;{=924mqPrvU*{}{ z6uLLna!e^2|4e{gYB@P-^eg+5hsnU{|KoH9R@aOT|3AY6e>JwB#>f?VAcEF*_rb$} zqlw^~tK+4Gyk)`317p5&xlW4=fZNC%kkOBwqqXEcgB1xoXVswPKw~!V$cdr$1jIB? zvq6Z=PcYf@sb7U_s-ZIc!&?xdDd986FJT=4SFRqw1jQ9XcdXQu8@(f!^9rn~@rv6P zBk-&`?Bw89F{|^y|2%jPL7v=|yhsW_TDDL1$IjJxpf_h*%#F*;F1e1C*MzuWSN@i3 zO#tM_&GRtjk~Y&dIN&FOTF_J2jcUkS_}*W|eNM<*XnkbFD6$kaj+*x}gC5a#uW$>w zDZ6h^d6|g=qo+btSZ}HU&>o8H6}B@~SJ6Thn972cQGwPUpV7p0hl!7AQ!#nELhm6C zhBR)lVs;wOVZx}m7#J9$j;J}Hub?kQ!n)%{m9R=nAl{vsrn=XO3aACIWQ|1ojs(-7 zVr$S+>N(xxoNqOmJUqqn9R`V@6AeiJyhS(2d4LMU%CBcb)qxd*doODGymeX@xXsYr zQ3Jb~M+BWaN@n%@#thTB3IV)jR;l?1c7!M6h|htn-W(PPBmtYD>oKW{g9;t`hy)GS z;l7@pDspAezN0Agkk}v4YTpQDApyBX60n+Jt`FUkaq8#K4nx@w!3JTc&aUfyVT>I^ z2+%%-*hV3q!z8M~m)mh{bkVO958p09Wkv5Ps-zoo)Td-oBKnxX&-v6~Z z8NRf~*Ai8_37s4Q>s9s z&01%SKarB9Iq>0Szy-u^o}c+cEN0vb+3Oaq;O>Pg2QV3Dj2KA4%W3 zHH2-r{2`HR{O{36#TbF}7xa-ZAs!5=VI?p1Z_Z-lUaPptIL2kO4fsLuILB|uj&Wd} zknYMZ%3;`N3Fd$`2^f#r0*%&hL2MzvAOU|hYtR<3#?hH})sTrB`T$yK)!_XewToG- zZ}f?$dJG+LL^wJY$#xb9E5pTh+Dj!s6eMm@&(zBedI4Gpolc04zoVIJLv2Hv0u#Wi zU8z3EL3(u%a)q3AL=(}mBzQ&P#tof&@w^V(dzdBA!3ZN8Oj$rAXz!8`a(^hI8njFw zmHAO};o_xBrbzUJTe)#m6P$V>H=HgZqwXpzwdjJp3-rl z!hWyh@BX)vCxg#M5CEg*w>WY&ixZ4QPp}RmPuQ7|pWuh*!}h2uR}`0uwhQknns%lt z-LY-|BE{5-o--n`RArrKa_Jm3H8m@x@&*2x=VGn?4ij08Fz)y? zT=vRs3g2opdWxC2)AGA6P+jMqSxxoaCWh25*@H8_*8Eg2TOI?ma^J4;`(IN~|0w_R zKcB@;L_$dM@1GG0H$VtNwO_$l8q~VcrRx z`2zF>2-OW^)%4eZ5>6l4W^a`9{qn&hnO(J@t(q|=O-RxLl(2QZeuxSOsR1wa(C(s?2X#6!WXVeRUl6RAY?g#u^lC_4}N>p=^vWw|D@`HLLvLma-0I!8~K23E;*bJ zKTav=jD>P`idE9(F)OMwof#(&O(Hl3FDfd|tgD)WdcC?uGLcxwb{l=`f(0ln-zR5* z-V8ep@F%$sbSdWvV3Tm;^0{^>`_9U=}l5@xGIS!DW9(DoM85l z#AywFg}}j_?IAC~m~XjI&k95WSkM$(#9wTR@iVIPpgn8EsoyWqSlAI5Cz-0c`ar1~ ziaiB@*-yksX*8!-At;{d82#nCxo~qSw{e?xwrUaHFNvlgXZSG9qKGrf28s#6hE=N@ zR==}OQ95M`lT9#DMhw4I8j+PjZ2jX84RUF5WgYN2*l8~k)u!x zRFe@97Vm(`f?B5zqaW{l@tjxnk^R}8-cA-bBlJQPsK&vVc_Rg%TKgM!e02C$@)<+2ufp4!k^C=0Z)BUd2*%>um` zACS&9lt9)Xvp!<^WWWlNiHSnSw|U^?D)9*d@=RAcWYHT#fGNxp;GV~kL!xH_?fopt zr9tMoawjREK$sQK^kV!k;%EB~>KKG$CRjp5lUXR&S2NM|G7E|NGnfKt9jn1k>o#-} z|1z(TOzT~{)F4pI!V}hfJsU`p%(D|^QW{)Dq+`#a2*khWwJKu02W`|~?WXqC!%*4C zZvdb#rxo@}kj=oIH#K@_rIdI52h<|CY#usT+5X<%W?r5U=r$C)B8ZkS45wlm1S|ga z-Z^D!2|ubrXIX*va{d}%@-8M!!5s(y!0(v~_g_DytyvT{4)Deq+=gF)HbOxNOWmVh zU1SfU@*U)j&yLDe{w0TtGIl_}SxODOrEKE~9!$K+xVnDETCH^D?;(s)>*oSub zE!`UVdtTvD1FRCS+I6iUTm9Ld_@vO_{$Bim9;O~VL-vpS{u!kIFUB(;mmr2)r|~2- zxQrD)g??N28th*_>Gh{t`;WL zHH=WK*??&*Cs+aue*Ni170ET=WX&tiT&5pQJwg^VJEs-CGe8cTLO(M4z+S=xrmA)E zU)>G=JF!*Q{73Mo;%CUYo13L$yV89=_71=YG zL%^xcBr^XBg(+@_L;B^WkYG0iyx29bQB^ROCnOPn{};y!4=r!=u5OOr2St?uiE{t# z8vGSPyzhN8cfJW2CW(O2#w2k=AUjn7(9bXn*vh(WwS-~mt$@7|e4IIwRW@0!P)T!P z3M^SvFv2cE=Z6sDXb}h!>RLG!dG!0+zw=)d-0=f!9lRw{&?0k*(ociy?Cgw@vg(M{ zG!BAJ$g>c=2p*Q-PChDqO7l#FfH6aqUc1^ef-GZymgpkx}UAOQrQ`~B^j{FAjXM>QuF)mwwlTi4@O6ggGii`CU&@^oh!Wa9~Hs$|!Tz0l6vOU)%TVp)H1M7th%a=}7E zpYO(4#3#u1ci?kkol}1BFT7b?0-)t(YtH%$akv0??~idMsp&tW%w0|b@r%n`&vnr% zollEkvk)}LJDIK)U;oILKQ=sCqWh9ef^uGuULVcLn}gS$p09{mb{v=l|* z`!O^;?^U|HUMmX^xMA=DAzsM&=UlPL2}uLd(; zX*M!`1;snnZ~m9MXNO#4f&6Jxp6M`QBe2AO$k%8kdVdAouGR)Pd$^{7ulK9wZPMjWn9w=$U# zRR~AL@HA(gLJ=vRL!IdLgsG27}js{gtaZrOFB^xz6hK#SW$C)5fJ&mZH@tpiEhx4CzIDtKH&n7y99|MNQRv*_~o*WX=q5m^Xku2cJDX6^p<2HU>Nw>;Yqa5o$ zk+M71f;emPT3~^peBH1vwDbPhUXZ!GgdDQ8wqhLN-phpAJ2U*A*vZ*i^~C+mFO|5s zVIzlg7*yen0M*2)DS?55$Wt&fi*$&XL~XAG^2_9rE^jR*u4Yw zcHicXy!+WW)b^kN$!qYz{rf)ZI!`k@3ryQjy`bDB1LFp*%ld`?<#lGqAVp+NnFSQi zLMW@sh^z;zCeV~z0$c`Gpo!<8I1BM*wELr0L>3SuS4lWCyGjFX`K*bV14gk_Sf5ETgy4>4>8kN`OsFahinaoR zc#;LYn!_UcGsaxTl{cWx6wx7c4G%fr@ADl7iA@cMOEVW?MqS7(YSC>N&mqJ*ap%eKbsMhtp4CIhxh{{4sOssoewVOUs&{{!QLK?d}2a6Y|{C%kbeXt zuZ-c1P_7&lI+rS-n`ab4Ef|s_8eAKFQ;j}FF`Fe-Iq>)|uR3oD1$4QJ>~}hOr9rhg z&Qi)Q&91`5Br&kd)F(CKl=)4qLb7S4lgVWk!Ki(NhsI$*(9M|cuSDdmwKYy<* zyg%Z-_rV9RltT7ZvomY>^hu4`QXs+;Z<^IeLrl{$L~=_zg;aBf{~|;<7&f1%hF_^l z;L8c0-~(so{EPp6EKL2xCVbK&BG5gU@#WXem}r7bfHCYsG1a%C-zy9{@_=i09r*s3 z6A|!V?5q#PnM172h!8@v>i#YD{9Q(Xt;=5$>2OjFX0d@+beN z$udrxM;l(aZgnb)ILsa?Ave8}*T4X14lPA?g2rF3!Z&*gD0xj1ga8?lt2akA^hi-T)P6H?R^Eds>s@Q8K*0!}}?)E_d#RvVkmyw}!( zYrEQp0=WSmYIN6jS0w7MhTV@1Z;$?GC&+OBe^}Y2>)?`HzOIZM01Vo*^Em$!80CuY zYQw+E?n^hIiG2X`%C_vvr_jsc1hZc@G>9BIIzWV_mue@-12(0S$+}r=@!Dc@c=}y` zPIy|zh92VU((%ul>&?aC=aco!oIJyqRxi7Jwk7b<|7R%cWxKUJZhO}y*#11p=9{n@w-T|=Lrql-ph z?FLH8=JHBg)D~nd#tqrA#$gDD7yM+obu#01^^E({6XL~oB~FbNbqJNsnk_*zZ+^yoyY+To0uo=U zDrk8wHtN!t(5?Xe=46QeeZy1yeOSp)=4munFZQbaOn+^7aJ;o5`XJOC?aM@88Myv? z`f^nkIYOo_!1jU>7-9AF+&;C-30nE@{~EfKWNqNgTbny#Sn(`xGhDBtCafphT})nA zCu*y5S<{pwkfA6>x?qu#TUdY-o0_q-Dila7pQq1;j9;( zv=qzid;8s#e3x>o_Du7#b^KL|+-%lwJsQa=7vErNv9(99*6LU*QfVYAd8?_6m-drJ zHTFg@8;zI!2G7x4d2&Sqv2(W;P7V(MhH=;{Z^X|dMCd6LD@dYOq6L(!Atw&~`GP)4jB`fsmYttV`+lLse%`;%5=9ur$8Eh+bh|UZ4 z%?gfDq}jnve?E;;gdkbv2TYP(diDVq`t+iFCk^doGY=}l>i5^2Mi==T*0HTO!}x4U zsMi)Cg?@e6x-%YMuf)x}p2KyP#iMG$O0*GhzmftEzJLFIbkVX6HaTNqpft_*0|9%u zk3TFUuPb3~k@@SC#2ZF9@0QyKN(HEoM_u8=9}oT-Z&Sc|Q!`y;`05z2&a0T@M$2V} zRHe1CvDte33CZe(VukoTxLKN<=Tv{ww?zwi5*bL>W$`WK*v!_|2&e@Dho~081VZRG z073z#!q^&>Iks~+SFXVGU5OqiSs|e|f`NRrAwnU6LeB*cR)PU0>KI7OB+)-lr}Ar% z1UOkQ&*aPlD~lho>ZuER5*ID$H15^*c&0%Hn!8?P&XH+HVlL`p4gj?gD^ssO@-IRX zTo26P@DtK^0Go}x!&`qTyZw*3;-dhr6SX?moB7M2UVGedCr*5C<(VzO|THTovpCKDYk)D#oB=>X{x}q#^Iv>=jz=GKwI2@&?BrO zVbVK$Hjh0{vf+NdH;20Bk~6=yifx2~6WYy0Vi)Cu zcj-f!p2)Ez5s`JxbL=MB5U;A40Zqi)({5%pNgH=GuUOZIs?ET^~IcR?> zVMDM81L+Zt6+$9A17)+eY3yUJ25%W^U(bH;jUuyhU-Byj#L&u_yA2+ghGWn71BJU2 zmfz**4y1jmeF*Y?o9k?1sW+JWmLq8SFIESL$;~jlTN?wwtm`3w;~Nh!NR3aQ*xIp& zl-<|xW)2PxZuWs3eaX7|?PF#)gvvqJZwoN8wQqVHq1^EacCfkla4_DZs>zkLy`XyJm?i-6Yl>0ha z-=Sz40-->5P;2f`$^-WsACC4U&h#^zbu7ESH{B=c zuazY2w*}OsZ1!p)?JXvUgO`qE$!HXWZyq{^U2O*8wWV!$b&rWJG(%OcnRmrri>WZL z-4SK@rHO$i%XP+GC_g{H+Q$9$ znRY3R;9xw*VEnvN2U%{#)gQdt)qFe7J&3GkZln`gglTksAkWBZ^Q>v0q*IcJGRlkU zk`q2NRKLK)Kc|72`|`Jqqm%{BQ^@DzU=<|-LjIND-?*TXdL#5QH_XZ}5E1(p3Q6*y zYCEiH*i-CH9HOVq7LtSALW33Zmpv99iXlsm6-ER=R@UV|g+u2M-aTPRuqq`zIT`1c zVo!UP5D-#3>lyVyNiM0BmM8g;GKoA$ZEpj9&a-8Ic&6>-BO+cXG3gootmMJR+d7@k zTBRI_+g~eOoeqBkg{Hh=VDw*(l9xK*D0!(P<=5+iLHdo=)&JrGF`?!&UPTySN0; zi5A5;Z9aRUP>jE*9dNse@#B~Ms4u@oTwEDnBIK;h_HKXYaCfUQ>+)^-_PM9(!DiI9 zqK8AI`@FujssA-ZKu)7)vU9Q%-;NZEV8qwz8#$8!R_>z>8B)*)Q^^G7Wc`A1cwMBP z$CmS>5@E&dSm;_5=%bRxYxk30$#HnIc&)QsSIE2A9GU*P|`qjNXDJUyEDB9elrGI1;|R$CG#-swy($X#hd>$=K0p&D{dC3yXfS(;FaT9|@Uu9|_ zD}Fbcq+Jg?C6z?a&=2%V5U<>tjPJOH>rp%TPCa{Iu!t?skMoMx1P)ur6XD!uNoS3( zPgSEklQCIY6v_TW@M)a!uhM?UhZmaN;gI+}D9=w`Op?b_y}*>KFLU+|tUU>`>AVL> z6(YEoYV%E_P66TM@cQ}P0DS^f45yq6mztTHX>Gh}aC}H!^|vpxj!;ANBXca3fwMEb zA9f99Osx$Y{(6baE6msI1WJQYez3o%(V3_$=rpb5`-jzxE1<%SS7yTU)|Kx>$gFIF zDtku0@Q{#;m`c%em`M2n9)iD_6Nv`J{4{)0zGDryf$71l83$FK=%d~lx~a_#oFG+@ z4s3aq2h_J=YuJ`Y+=OW*SYd^Y9VZXEdLrT=NG!E@>-!5*w>}zJIprg$n)8$)J9Ks< z4d{l(aSQ>sSNc5xLNV3{>huLKOsS2Q5PEH-%r>SE#<>gBWUg;4X$MCb1wH{QoV*-v zIZFrpPW2Y8Cu#&HF^7PiGsM9LmcZ{Rp@WRN))8f82HZEA9tk{h$`J&P(&@WHkHy#@AAD?(^L#!vnA2iQX!wk2iE z^aq-dE7mMzS~0A6RCIN3KQ&{QOyzp-4Dj-mNndhEc97d;*lOtgtInxv@gM z`E&*{3>&a@%ji7D&K9qQE+w9fgG(K^u(TYOIu)j>VcD1*4)1(;hyAegkN94r4c_V+ zR={tk<3i`3^`v(07U({T;U)74zHhM5NLbWwh6Dz4G`UBrH{{l3u z-2k9>bbIyn6b!NR_0xxCtux+#AO;(3XY=!wMGu3=KJg@IH}b$MRLySaz7r`8h+%;& zd>60v?6JK2mX>&Y;Izr9Uvu@hQ`YMCJm$<+^b8uD%YY&2OwdMUKK;Pt53tIM8&>XY zd)TCbms;WDq)gKHT8T;@ykge$d^MLHH29+U5GTNW*{f1%b80|Xb=?-9Ung!_rDKAg z^z6Bhc&{#Hu)g$JVl|I_%d*QSQ*A}jbKd~Y#Fq^@Kmi-Yc>Ip@s@C1b3;TOaQTby} zEL!`+-3!q*%nnJ?vwWUj9Y`>c3aTVL~>cuAZvwuMx(|PVEAOiT%(5j6%7}pGCS|ZF6ZdU zj&AAalC^su*>nZid5%JQ&(z-x1Q~7v#L)I^uwLexuK#&4vGl3?uV{oW?(B~(Q?%jC ze4m_f&)mH1ZRHWHBro*A$XA>IOdxuQlK?m+znDp3@(9U@(Ms zDxi7s^iJ$E3oaG6G{qI!F9S0EfZ@5py2bGBnkQnX0VRxIq*t;&LMUv6a18-7sSQZG zb6&13RFgX2xYnykl`=Fm1mB*}WUzb1aB;=RsX<$xr@sB~dx@|Fx=dn;EFj`v!hH`@ z=#<{v$H(a$fGm=E@dEFktMOj^5`o9c8|7qXL)ZtFf1^6gdIgXc7zl^m;Q>#>0Oy}5 z@7hH0A02egC2o(rmaIQlS)R(^K&q^P*-Cw6OqR)Culxcegi(~IkazxL{NDI>n+1Qe zGNcK*QQ?SpfRO*<5W8a(C@lAK{tQkDqtIYZCjb#n$_8TYK2U+@?Ow()Q+|CkgAtri z0VLd|QmN@XT3WKR30rsXK@!(~7qXx_E@4)1n|vC|bb1gat-E*2aMi#n-V(ou9X7!B zLkB;+G6R+aJr*I)6J$<@UFr}>58E}!t?)fRZOLJHra#IDvj=*jzngiR^`=}6j;oJ6 zd$+iju0;e%MRh{L*m`jdY?sHk+~H~5hi{*t&U8XUXTwvG7SQK6S;+Fu$J0tiP~mUFa0Tb9fYW!+G86AHoN zWi~`w?j*=1$%tGa-SO?+qP{?Fkyo$9fOG~`6DP?F(sVOc=i*Q7TArzCG8G+OU?8qY-@kKBa1`SL2|NiN5Yl=ZcF!z*@k z^u?wls)hxl0>Ajlr#kG)mI^R7{dQovg*O@-adtw;HIE&vOa#I4ut?K;#4^!Ht8;SW z8n@js^7A{74Qd?H<=flQWE%Cma~G!dTiTD+Clk`rNA4j$0yFq2aFU1t_FfNjC?7Pc zIGenGQ|$eZd4_<@u7K?7pP{{~+tTWK#KU#YzuIjQaoB{Kil1GLr#Wc3vSEYMCv+mx z49{krkQ=Vh$%_#;%I)y|Xn#@V2Gn(w#7!A0wWurm8fhA!hiRr`#r-t>sGmQV?3S)D zZ)L$RL)>AAvl7X3YzTlo-N0}1LhaHECRp*Oz z&RX1wsd7#15_)-UuDdQc*RaG`=27wT?)jhj}KAeE?Jn;pN9_1tC z3vX%IY;P!b`@kWz-W9+UckbP}gaos|_ejH5%^D8&L>;f+XPzz6=z|T8;{M93RYA~E{tl6q(((a&$9uz-v%}!F-5BQ_vZ1%Z1_&(T}S`wY!&r( z+fs6$5ww${VQnkg2=Xjd&VKrPN0f)}638an2HfJEbtFfD@PhCRzi<$cHt|@qfGn{U zA@;7$xiJU@Wm|a$_Io0OLHlR8Yy38J&}jk@NSzD2vGE_a!3K1A zn452MKR+mk3pvm;Y`@OMYy&fiCa@-kU>88n2mWUA7xOiiDP}ijOlYsXkP1VxB1+L= zPHrOgd%V1>bfF)naf><*N&<@eD6A1?3p0z9Bu0pgK*u8Ay<13efviM|M5r`V<-np{ zC4^+f2C5;IL~_Uj6&vU>tbd?P+fu+17Ry$PvGp@FjDyahek<{ciit^?B`865pug3_ z6(D^NPUJV>NK;xUD3-TWSqSO9;uJrLZ2^R;EzwDUpo~!>D0ay=PegT6327-N6G41y z8tfl|=QlSue=JeKQc*wDmgvDP9(p$HCD6!gqp3tp0(LDFPv8=ZwQp>kzKB`>a1y(5 zPIfxF(G;~uwFBnI-i0aLE5Z=iFJHsCaTIpNA^w_x9f~#YdVyEuZSTOs~D0BpH zYS<;(v}_-DZae-+e;qTW(G;!)on!BY75jqPzZ51pv4EHo zmrSkx(7ynt8(?0VQmO&j)MMY6QiXw2GPTKJAsZfq1_Sja%;AjCCC;exFE(DY7fL8R zN)sZ}7y;=Cq~Lo=gnH98hIKbHxx1=MZZD`U)eQlN`6#ISBVnI$0loWb7hVM`ULGoW zFmAkq`1}ipsdBBiBOqbD`seer%opW(7rxtoywBKL$`|rJIZYo%p?lSn@(AHdIE$Ce*W$5xuQ>Tpl{coHQO34Yn`TiU%zZV%*U;0`ANPP_NzAr+5_EjPHIj_T)T1I zy`E-;troXY!&rn|yTve6ZeHTI(d|oL5~D8Gck(X(4p30VN2Hz1LT98Nk3J2WnJhCN zyMi$bh0ECWB)^E282hzf82sKgg2wr2PL6*HT$W2X<<(@OsgB!i83#l2{4M+rxfQ;$ z+=Y2%_X7W6E;(3XyEAb&R=c2tMuvH7S+T^*Sd{xs&f^~t(m%Rc<5gkKnps~DE+t$ytIF->7&s7(w46r; z=)d}vjAh~-m=%YNh!>tTT)(ZU*}Zs=K5C&%h^nRc?tC0;%+nIZi)>K)!#qby^c%sL z$1q{Az90%u*+NhKezAVpIS}wNyUKrfnM)k`iuP`vm<9)XzZ|iZu5-ZVSOeV=MJ737 zl+Bd*9Kbrrn?QP<0uDo&=BJ}RSUUgz*3H%5f@0kCg#3-CK{rRS^A*Hja9optKnM?t z-%HRsj;4!0#vQdEAI~?!a!o*PXfljNyi28FOBGogS@`ha*jWt=-9x-eJLRj7r=oNaFVIoMB8yJi7 zD)DR)5aTczY6oro7O*bKCPK+c9?BQ>BA7_+V|E*-00;{WL4WM9a}6T*rYX*qbgXR{ zr9gclS>7T|`Jq$xO;?Ef8X^AOyW{)zwY`>dLLpSQ5^Do?z6!=QrP*}0uzwSA45Xs9 z6B;b2_oXfc;OtCcVDTqT_a`*fBIlrYX|06_fhcU(3k6nVl;WiaH2y&@tX`_VJ;9h| z`EU=bGZ%W8hZAbMFepeM-L2Q;cW@$`IPMMe=yEchD#ZHQ$K5~5ChF=rT$x$-r|ZSf zHqU=h5WCuRhsUDRVKK$->QjP^v<8irz)ifok(*u^pTenv26BeoaN+d=@1o02KOo*S zRhds0zIFE)HlMFdbgo-Jqr`qxV<0GCzgXt>Z;n_Ff;UyCAOK5t=^s6TmM*)zRaa0p z`L~0Ig;3$~z)UcN995m4)YPYZ8)3}R1P?(}cifNI_u9J0pQ}sstAqF(V966-Z>ZN( zO;lBZ;PdQB|G_as)xAuP3@v@x`s4hBvD3@-hwrP(E$+l;bhx~t4>gS`Znbc!--%Zi zvYNrPecj{eH`QYxnAUBbHrUw`>{|_}Wwz^#iAwxD(}7+ZHE>Ntrm4a{d_dS^>fGmL z6ueBIO0jL+UOfGo1P||3ara%%)3+GeYKO06X0mjvE=kC6`Kyi17MK>?m)%KGm>t9O zt>p0UL zJM)*8*^S33R41x`CnAlY0q_MZSs%92(4d{6<@*bA@Z-n1UPSQzVz!Zi_vd!%ivbJ5 zG`we@IjJ&K0Y5Zzy#F~P_ZevXA>E0P-mNo*6S>vPF}7x^Zye0~sRE^)Kqf57K%jmv zL9pfdYNVaV^D$q~9|h=jQ~J~F!20|HKj?P?JOfYP&fgLk+s%EHSACXa`uly8caow% zi+EzCbrh~&pT%^$m*wY9Uc(>tVQg!G*y8vF91CNJm;`Q$*Rl5TgUQva{7#D?P8#z; z%8JLDw)V$1R`Nd+G>#~WTW8lZUV~QBEu5uQIeD^?>f2!Ox-v!%a|M{|-A@F3r1C0p z;oE}}tIob<#vsGi<2B-N_yyv&c!Xz!%m}s-+KZhEn&%vl&G+SFkQ1ExdI>cef72xG zstJXOwnK&rPBOd&lS{O*h>>{o%-y^nFiF^!-Mfs>HOZJI%wDctW`JBDEHahD;*-Ou zFDRCb`mjFK^j8?QG-&T7vI$`rR3HlO(BI}M`Ep&Uo~%*0EyzQJb|xcMLpf}>?G&zv zdi~OsB@M~Crzl06`!b~|_Rw`3*}%)BkLkh1MN~_v@;D_i_a`a-nB{JoyzW^06b5%8 z(?IqtNt?HDcT2(Pn;N9Rd{y52&0QWuKWUIA`t?CtPOx1r$z#T^Ck0*fx4Gfy2yh1- zV=uKY*Y*$;Zs6tpNu_)$Uf0HVz8V!fFP^o*r_^m|ET6{v?4J1`efn$u{84=gNHZd9 zhevJyr9A;<4nBRKL?mc5c}O38vl{&Kn~L}t!!_-w=*C(l_b09A^WS|3sP^o(9I%9te8fm1Zw*zNF~9N+rt%I&!^(}V%? zAjB!>eIB4UE351I-)-{haZ?|ZSv8!TY_|L%O7m4~W}5VU7E`Kyj!-12oSttx2mfeg zDWI)S-fq7xu%ej#z^W=a*~8`o6+(fP|FbF;9oc?8WaA@B4;AM6b(pBoX>}*|ysKsB z1Jo(iCpF*CQx4wR?Rb?PB&2QL?ODI?PN>V7DZ9rkC~vm;rkn7xpNCzweqJWGbRfow zpl=2GvsQ=+ZGHQBcfQh1W}Z{R8?4c<{c3j%wi!rwErsi}>05t#-JTm^>ZCSu{-)J2 zuC%ay)BBb?Y_8iP+E0G=R9FCItYPDKs{-v|hz)r)!@Ykzis?cyBez75TR99)YHhh~ zkxoS!DeXv*HihlDpT$TFA4-~5*u6G(Yo-c{GpXfR)k{?>`=~~kcnR$ ztI!D8Yarp!t4A{f<@@(-Ao7QbyYT-^1sbnA?c_%gol%D_&2Pj-MShe-uvn-+B~riA zXd0{dh_yB@AQ@FQL5rZE^l#X<*lYDk(CI2Yu-&Fz529l1QAOOTEhV*n1r-{94vXjE zRL1%btHMBQYo`Gw1)A>7+E#a_xv)?f(lK-bL#i!yVjd7Yfw4E+f{a@FIypp05Q{?B zD=pum+s~A~iHdZ4C?6c{5~)=P9>#n$WE#kck663?h5e1sI?LTUre48bJ%FRhAB}FI~Xaq zFL47O=&48%t#Y$^zzAX{MG-dCViwTK0IUg zP`%yK`nst-q>q7RN5W%sYGZ~f3G#6P`YzCB#S&h=W%xg(D*v^TcMa07C}D{#;HQ@N zv@0IP_B{8-2ZsM7N)IyVR+98s4|Tdlp)=#P7D3geED`l28J61qBJ<~st5x)?uu#)p z=#A^+K~WAv6uQK<$W;E9vW!6k|gYtz#fGlQU@^E}hI^_lSOJ<93u(g=I>xHYU_ zIg-Ok-wSDyvb>MjK98!u<&(J?Ez&^Ag;My3J*9)UW$^)bC$>c|AAaC;_aQ%>>%Y>U zOa`GiuBZ+>C~9ABmpCQR+qiDb%Vi~lNx6C>>$OR(xM`6NqIa7%Sq0-CsPJ8?u-2TY z#~=}_;QhYeB28hk`LnC>ul7!+r9k@1B#Ef2q_u{d3BNs;hBGkes5gA_nx}NwNWR%w zsLaUD_g@~+@U-2_${(K83G$}z6XE%#SvT&ruFI|XEbvZ(g3-kO61+-sZ}yQviAPnm za~Ey`zL&bbHm&irvp(UnX3%MP8jGGb;jcc|{g=-W2L%l)ob;Dg13I6E;J{rezkvz5pZ7ZDZg9Bi|NTzR zmr_N3giNIN62vr+g*KySSN7ol(>053QQqSeoawIc@n8 z__$<0;WmXsmr$}xIMZ-c{Sb#2*B>^4ioz}!!X!J=m!sc$R96DD4W13JYj73;aUdbS zYZmam0~=`KPD4n~{{5tb*L%gAp1SZ#mq#H)eVzBcf6B;E6OZ`9B$wWaE#1S6-rgOsjB`q_Z-*Kjm4+hSH-W)H*IMr)Ms_5K7x&TPh1dnidc==v|T@ zn_N2z`EmnwL`9Ly#(@&0cn(1Aqn1rThKa^GeK=$>zK7GC9P<^}UpU&FZOuO4i)cTPedtVBfrj zbnmk&#{>pO()SQsUod3oiVvUJL_JK4{wD9BSN;3X()#!efA| z@)Wxak*ZY^~Qt zvx5R#e=oUS%u#|8)FIJj8bp2LbEG5Z9DAPu|5{AzS*9p1#j{r@>Yd?RfN$4)e}1LY z4%*AHC8+Q;Eh;)DPUB#U-{{vwfpMZY*_$C(C^=bs-Qjb|>2J*<2D=}MRT?guLIaUv z*LG59SRqrez9iiQ$XZ2?D^iO4PQv9@l3}_%*8DJ|ef~4sdYXfRd!tC%x28QL8E2_n*9@^?l&&!>7f~r-+JoPG%p` zNVudqW(v#QR;8W5F4#^GDn;a4BJ=ky7%H6_GoL(j>EusiLXF$2w_k5E{~Y(P zX)9}}+#bTz5^W_%ULM}StwD~IqExRbpFD5=q3i+h`Wx2g;zRXkFRTSl^lY)IQnul>qm=@E+{JtqWR;%JXclUCrE%ZLzOp9-#8! zn(VeXYWu4Z-wBOZ7x^rOT!^l_Nz?wmJ#FFsS+o~L#k8dT z$nY6&sol^QGYb^jlJ%CZe!%cMnX~6Gr3E=BE=aEkUB28yI9w-o8Y)NXR+O6fUlB@e zC}Y^H`}o488iux??)nZ|EvlurP8=~&MY>xXkFdT+vB+)u71;X3DZjNTb?@nD{kD_u z_Fm1-TDu+x!db?6=Z0*=vVg{X=0EdeB~?3cnO-FrCnY60qLdm+=mIn}`-irrR~8>X z$5++6q5-2I&oBxs|M#Vym-L?nG&aw-@ds9Qld=K1(G6uP=#_uSa3Jo1?D0|=15wIb zfCxd$ZDYvunnyjj0semB|7tZ~JnGI}w4J`$UjQEDP1{M|CGCI0dmE60?r zg$-_L`nmE`@}BNo*QGCJdnWYpuq;pgL4jCb{DE?@6<%|w&Z?#=cUxc953`mNpNdj& zSXL*M;-y5t)<$z{xh31dS-ylX%g{CIr=b{w~z@Ch@jsNOwyN?FF}x6d~XNB2R* zmX!>|cuw-s`M0MHf~T|Ktc0ZwJmb>MbK>JU`i|;%)up=lclewROEYMGposh3b2&~M$dGG0?8sX#nQ#(uzDN|u^LrB!eC$nb@i zAc<0~s4DmgZ9Pyl9c7aJ*thq0ZpR&n4oF5I)HUW$^KWcEo7Vl=-ugBOvHi^cTJ^aI z!Mu`Vtj|?3U%cBTZ&)$j3embu+~;F@Xm$h2v|N}Rq9u){;`|Z5mC0u>J&5h})9XJb z=4vo>W?n39q|P^fV?M@#eBU8ZHumI1+OvpMcVVsJmonxjpe?sYjzd&2w1yE|W8(4P z%l`j;9ua2;M5DKU`FTz(b zFCnAu5?_F`8hbViJW+QGy9EMItt){l|KQ}40$)xY;Q%x(Z`134sVPOV>0pYe5|Ml= zeh=SYFgnXCV;8}%=n<>lE!C-Xnwy*ZS(@TC?jKoaA1X?&?cWEQ!(O@@*VNa)4_pbH zmh1)nOgG%h>oc5;Du{2yC%u{p1neTV1loc9BG%^}&{;o5>aQO``52R%!;|b*C?fQ} zlLCTy&A;VX+&W(5=$v%K?5)#(lX}>htim*mdOjG@AwnK|{;yX2R)GHi(belU(1cEiAZnv3kE?K?BTP3fde;2-_DL70b_ z_XD1Ofqt@A)t^h;>mdh0(c}LnqILcKy}TrkvE)QO)|JR_-AMQp*!zDL>r3C7-#aYX zHkr`nH#pcjl}g=$L8*nrOUaxa#=yb`7f0=DBFAiI=BwhY55)?5_GR+3k=%;(5TlQn9>c-+cs%+o1nu~nP7bd7F`i}Xfs@GIW>i%RK?-vM(AtsAZMY)^Vg z-^X=DPdy?$bV+M`u%l`wDjA}}Of#kaiBYCkeI#*vth9l8QQ+1a302?cEw%s@&ApphH>6AA~#mpJB_5B>cZZ56VYW96tQZ zRsA*%g=>_@399GyZ`px0MT=$X-weM6Ny;F0PID4imi?gA;Gq46T7=}o3oTC@9%6&` zrgn->-+yRqc5fk5^FzMVvkZR$g?xu!wc6uX4Ea#ifeQm<=IIBC2cKV|)w*OAF(Y5u zh!HHz(D#DG)ubgBi0c}?R?1}QsO=tfzNn~3^xV2n)j$jNlY-CpXa!atiNLc9bhzEL zrj%P~6Fje<9Ov$rLi9cTO;C8+0 zxE{KfF;w?K8SlQW;GwPo`Df_y;Ud+Y40rJhRa#ckk#bx=#Yjsp3cFkpI z(g^3lZr-?SJ^1o;)c}^5C-sc_cGXGXlCmL(a)2~4^6SKNSm#ptB#et<1QE4hq0&#_ zoSd%P9*>L`(fvjUAew=o_aHlSA*ICj<4L;Dgg1MKGKFQAm?8vK{F!2lWu^kwi3Z!&)&&Tzu z4$?KQ`n*`N85A+CW3rp ze)21l$fbOK!|}9zVUXjbpq(?OA>U~hSehR_it}XwwG)TeLviQ{3O9G`+w%`H0a_Xz z_C3f)rE+x5Z)id(xYSXGqkdO}pr25Z|JDaSWgng859ck8x&T3E(+63{L{*Ba+w6^z zImAB%;Ak==C$W%f7*ujhb){V0+Izm-fAZ=}uaK88d=%-Of$}zI0 zX*8mQI7VJ^Nd*@~@=*d4?Q8S%eI8=65JtI>q8JQwOqzBPR- zqZP*Tm$AXjUDJjw`t&>U!sz_N+D(TZDkd3C-!mO^AjS{xj=oZQMjVPmKM({@b2;Sb z^3;!+84p?-fF8Ke>}V7hKq-^TRA|@N%eysHp}MGoURE61aBE5Nl=JF-x@prn_1m}K zK7amvUh`Gx&#5NyA%qazHO6{~U$%Wn4b?Cd{42ieq70SfULYA;-AEkubtn|fSO(DCigeBzw1;eBY+ z4Tyr??MkPKFYaDQ35kv-wWXM$;oDzK_?Dem8%^x2t_K6GQzJqcPpo!gq{Ah2Vt0mu zAJ7q3Wr0vHGE&_u;Lg1C_@R7=FL&a$Q-Kgh_!M*y@ zJE5e$*-MQE*XB;My#7(pb6;eQb{aoaod4*Cmv`eHPQv78t!ti3&?Ju!lO2n$D7BV! z#=^@ryY|k)F>uFRz#Jt?`_iknlYJp8rsu>W)_(V%#GpxX>a$}Sk_S9vaU=BLv~r^1 z(ZN8qE5HKB*2KD-Y5HFv>bzdQ5ajg5e5xf-d2m<8Vbt1eu0PL+X^IzIIDVl2<2?<- ziCfQ}WkV!={vkywm1)-2*QXbTO4i8p1%Ddq_AYR+v7IicUHi)qREtDUO+8h=bEmeh zZ9iA>6U6+~_7{*%ahdvYOdd2HH!gtV{atkQ89=-gt-i%K+n<5%byv8!y0)9x3Ch^T zre$VofR1IR9xHeLu#*$#F8l2`!6@<~pbtle{QJk-(`SGa8|5W*P#))HqN>Po;zY~O zND&$$mr%g4)lsY~ne}HU?Qr&zLku`gr8U7f&6^o1=$=aT#t6QW)E*7<-D*ujABN=` zuD;-x_Imi9vXfcRQi~v}hbJm8n0!mj!^gfj?9=4hQO zrK03Fcu}pGhU5As8aY40D2r?aS;!+1cS+OysBF^t#b+m-kkp}`J-6H*rP;>h_voH3 z;f@br(v?N+Gke^D|37@ac{tR4|Nj3TgRxVn>=K3~WgY8KvZO32Dr-p;$}amLA=#3U zWyUVao-NF%u8@7pTE>=r-^MchUUOa7eSf}x+{f>)4#$y>L*xB=KVRqbJRc|eM{z-| zV~J0Rx&KMY&x*&liW+W6$|)tc`~I-$X^_WR-Qq0MZ7*NInJM&VE|?n4cYVB}0Qs&9 z2J>OFV)}KFO)XiGT5~q{1tg!!H440YP+S?YEg@G+75wMdb*J*#fPqbXf@otqwT~A8~9g}lb(G>%XB&Ux@u zZ^>4Pq~j_7r`gb%;nGQl)fMHl_3VgdOk4NsH>r;e`E6{B*}Hh4QD6i6`$c$*I|3VC zsks@r-h#9RXe&xGMpsX-e68yJ?*7`p`|tbRJ3%>P&nB67`-=l?@wVZwSqMjID}LIh zH*|q3D?yn7uMqJ7o0y0Mvygm~e1HL^FJu{i6kwM8f^N%6X)OFIViKlB9K0OP=BI%2tpe~{l`q-w4uk*9R9!t3@vi4(Rm}RSa==0Mg^`Yg zKD;e1{SgWs4sa0hov_30?$PT7G?ej5RQC_!zrxGEKkVOsjA{k}D;VZTlxlZb3*d7D zvmUI6aA&DsNCtOnUushE-A%c-!BS#viLYO8WJH*WI)8F|j@ev3p40lF>95iXzgPPD z4x8LGjVr>0PJ(GxcY$8VNxjl1*VesOep=kOnG_Zkt-K=|AGF-s+UhR7pLu0CE1I5O z*97m3E6(1m)A}HBp&1_;9N<##qvy9m9kLO$5P7r8k6f2>X@4*+DK>vs_y;F1^|>!r zMX9D1z6;(#FI0Uu9anqcwNk}m0})%UHO_0n({Y`^jUh{x!37(CXJmOa{wOs zYex2#L=iZ_1P{H&{7v6VS)B*3ppyOP;GoedIo6%HAH3@OGWYFt)8C&%ml3C!m{=@O zKa;+`}i|u-DzUxlmLZ zGef7ox`IDyCHYW(^d^I^q5Rsp}}qW0kR zJ%AW=C9U3bQ%MD^ati5M8Z-v+Y|hlJM!7!a#9-D-%VsWx&wu4?Cz3XD`gL&AD;= zjg;@s;tkYZk~?s+hY9sa&vEl{IuU%14(GTZ6%|v>>(Z_(^x3^z=kkQ2WEA}uNk)pU zQ#g%%my0gk^_6(7)rF_zn8{OuczuRW_=O|Kjyp7~jM+EGkfNe3v8TUsR>a^>UnRE~ zH`$Ig{5o+3JG(VEm;5?G?pwu~y~9JeukA{M#rzaoPVKiJ&BwBA2fM~=->H8xzd3{9 zQ?B}9^s$#%nDJWif?gwrqhIFYP3E-N0t@6}QMF+P;l~}tL|>DjaJSs1Y8Un#gU42L znG!2D2R(e*R&wSr=0(p|?G0U~0TxAWQU0{ux~Dv-I5I@YS0jgsZ=-5{;Lorerb>@kfly1JL7jO@5Ck*GFQXIuZ#&@cnDPjZR3 z{r*aEz&iB!P>IV&znUkXHg85{%3eAIn~Dd2RNba-OpAC#i^XwbrTXatQf&8rmD{d= zd(=~9fYvp%3lgN+o4fV!^X&*s0!-fy+vBMyV%Pc5Z0hilao&f^Uoo_>Lv;h9zv%SN z(wLJ^nd>D1Nv~$&z2My0mIo;DuIG5Sj9!gn#pE<(%X_$pf0tP3&LlJSv{J*bo0gX6)dMuwMeb5Y8>USj&~^k-{dVhA zoktCH^3pu1VnY8D4U8^;AavZX{|->nn%de{2oiH8boZi z6`aRR_pvrffiUhWbh9Ww{{kKTb!hOiL_tK+aZ`J5XZ{Z6x*I0oiUxC-YJ4n%DvTqY zL!+$%kJ2wf>15uo-^=+I&jd!p8$b(JnJa8_2qAWow^L%KJSKnZRTuz0l)6rWrVgS2 zz{pRBkq56@l%|y?UVKB=Tt(#n;a&eG9^yK5QiV6~ATv|wgQ=lHH2r*(=M)%q?hmhV zZ4c;=R=F*LDpO{nRoR0GXcfOV|3Ur<0z!B13>jofg0{xTD+7ghZ1jB#>%Nv2*Kfaj zb?V}m09G99R+i)ZCDjrHkvGcTMwSlaH++I;$lzDCl{NhB4DX(r6Q}+2hxV6zU!%#` z9bUbf!ASWa8#rw_c4KHD&p^N-aDm&q8-B*$BOnbt6Uirn*4sPk8a~K=c!roghZ)F)H@+JGUL#G+kct=($jVyxHpQ4{YikQ_|gw14U*w(bO&Tg6b7V zrH@-@<80<>NMBp&I~Du_{XLz}@f=&;$^ZJrC29{FAOF#4lUCdhZKZK|XyOG0^fZut zA57+cDzjT*iK@VB|LOEKw0ySAnZksO=`=l`&}rhAsr^E`n+Nx@}ivf7^Lu z^!3i1(4FD|Ls|VC!@EB?q?*7(PepTzD0@q+&g$+B(JLY+b5;*TXh<^If!{CCG~8zz zt?gI69BbiU46CU0TCv=1e;RNGc!tXNr#CmI!~XU5VZjk6sbYMNtY6mJXmy}jEn!95`OZTp>r>AEMv~unF?p8MvRg_PGTl}=bqaI;j zTY)C{*K%%VYgvyZbYO#02Nm8nt?%vL@xA<^n+C$JPrRv+wxAy7hJjL&b!D_}RA z{Q$t>kD&9!H+l*91zw)_Ui$jcB}dDjS1L;r*B5wvd8kNSRY4Q55w1?I5#8kEWcgfU zOB8`sv%$Vw$gLU$>IC|u?6{{i`0}nL-(RB3X1%Q+&pELl1*!D+GqOSH`SpnYkY*xx@tNn@dmzeOl(hdx4X@gca9$sy2PAukHXBya@-2d0Qh?~acb1&!wD9v|B^7+8H?ceU zn@h#9yM`yWa#j@Hpc19;h+qQhfS+Pvzc21k-M=)fcG8cadU6_fZ0k$h13>F|Xr-m~ zeaiuH9d&Z%YLS#|21`0cSKIBKSQV!G&AgjnNYw@1Flf0MH61>aWDi%7Qm6_=w zy_b2#NopW`&D6c@lW!G2ZA#NZ4LyAXcS1uRY({e(epTw+**6El<~&ogMJ>ATx#Jp& z-9>uLXU>;BT)GOMxC{lNCG85C1eDCr3?MqmF4^lE;cAt=yRL}y zEk;FM&70ofCu%g^EH4LlS=btYz;>9fu-Hyk4D(Ri(naqQFFWXC7WgRU%S25akZiCiW3VojZs?-I*paM4aicQ@m zWQhCL^TNW@^2srtg-WrxPItdXY2W(vQMklqBhgi3PvIZe|R!YkBewLHwzkTkyU@Cv4=)>@&ke{@vyAT4odiWfS-A52Rv@v5&p^^v!)Nb zSLVXX*T)kUg5ZBfNM*@qT~)F<7Y+NsVUL)9C@>7OLMTaZryF99;SYn| zMd5igH?(LY#ipdsxaq>nhI-2!H7AAg>H1SqO#W${uL=#q#=8YhrKyzc9+B}!NC}g| zlJ(LcPvW!nEJ;OvWz5DStB%HiB9Zy;E~w{3V$fwO@~a5?bg8a^W>yT=?Y0MGQIevj zGlFAgfvfD$<7Z&+W_W?|Uw8I%U)V>_OU8T0cd3@)6TLg#iFMsVB?u(#;=Qaf!}Cqu zY7|{9<&J7lLu&xWpEm$xK<(~A6n0#0f+y!iPAeupd2w~H`ccAyai)OxrJGdNh38+ykDMs@UnA;*8KU%_=08K<2=~2 z7PZGi>(ew(RknQVa4jMhe)&Bl{Z!M+JHW42Si(FF$79v?Nq`|tfuT#*P?@lEho%I~ zn37y`N{vMX6nipr8c$D6d8%pt{?z+P+PZQnMe!>=hzGL|9Q(HHACBApUiKALFE)QK zuMEco?xgdrf3xYd7v7_(HTM&Mx9=Z)i5~TfEAqVkAr6|H*fP*r$+469hkS<#1vZAUu)duo#Vsi$JbtaZ0e9=*secpVl(}_ zhEdja8wyzrX>pB4d(S`j+a9{_ZDlH+q_7ju`W|<%B-=MWRCZcH9+59Ecpn3DNU9t?7vl5OuaG)7#$)3h~WW-x9umgwwce8(v8tN0#Y9+`Xyn?v#%;`#SaqZ4TV~1Dve? zNQrvO@AIp11AcMtaaSWEi{f6v#HIaoURbglonFlqiCQb8I5s$}8So_~x0`y*-y1$V zTJN{lUBG783|xSJCM_S-My#=*n5>yFQOwVdS2Nqco~^^!LIEhw|7<+$Q>oP3pc(xB zua&8jcv5H@HRb?JXuUXq-mKKodByIizPwVm5gqhf(-t9(D0-+eQFWq338$hiH`xrN zKYOCg1)1&A%;HB=Heua(9hvEyPi$)aC zCIynl!=^Vn(TP>ReOd`~psG2AS$)%@`Svh?0fY2K1|_NG*wehomMnw@SyZrcdBjzbW$@KWiyt#-ujgM0)nix*RuI%EUse{p32!p&z6R7 z!TK^0{D$97x^xg%{)YL^FfBsWdF``jR3xBEZPQ_vkw;ePOYNzO#sQmI0z*%qIc5f+ zxVjWvgfn7dS(PTkHB@0UF0zC>R8360eANi516Fs$@Ju=a2{}GQ`&dgYUHx1higf(y znabG3@C+-jZReU_Q7V4sednI z;MBr1k5ZZ9RK*Eou)AHNG!g!fL8>C1N~H>5+qO2JN{N7@(DH5>b6*I;$`PEFPccV) zQ3U*Dm=kVkZl^8oW+>=R*g1<~IbfL*$bv{VF=~=u!=m0WL_Gu6z|(h%kIfoLq*{5` zTvhU#cZ|{grcc zGPWb5F zmX(#g(EUSbtQ}Sl(np;f{N~%9@L2Wf3nf9O#E{3W<7W`=#}C4_XgbPUY)noNjBKpG zTf05h0deh?U73QBdu~v8)RGA6=uuLUeb?nQ&dTPvEJWMApcWs|vSesxT((QqXicU+ z?%rU@G1tY}V?%|wqbna~ZbjS+4YsWR=(tD_C#KK2ab!dM;fdhJtNr%;ezS?O_15!! zkH4T)mI4kj@owG2C1v@Rd#c3+du8s%VVH^v_Yl&fucz-^{#cKZemj=E2!K$W{FGOI$C!n$cYMxi#)0MxLO8!&6-7LQ_;W(w4se zu3EvtgZWIr1F+SR7atKp6y@HdlpPql3BwNhXf7Od1|F8f4ie=ul>{$B@^~z%{pzFfz zUkQO~@bp9M(XN0*VlCv|tFNhzIDu%+HUB+XfpMeifUEaGbA8DdIjRK&c`TfRC!d)} z4gzPBqNsH*SRpg}&6ro<8j!MkYJzB+^Zqu#Gyl+SDgDLwgF_@Yu%tWKw!9Ycv}6ks z802B5h+H8}iUK75=CoThUtj_1>(Dg;(AP4@IvahxFLav+RG-hWI>w|GI%A?|BeiBD z`71=Gp_*P6=wOp|*+%T7MOTE<@@P$O5u=GdoxC79kUS9h=^)t<$3J!DcZEy`6D7Rn z$(X8s`ywP62#AR(*9NcEyy;XNe6a!TNTEWl(c4`y&>#Fp+Dei!eXMFnWZgueI`*4o z`Z!yfKy3P#63rKR49_nGGtKjW)$2ZwUc1@>yJ3jaC=T8mso|g?*#%{OZ!v zz>z&_c5#Y}rH;Hkvdv|*|6W0YlSr+R(zP?_>}vhD&i;{Nm;N|L`y@!BE1iSL1?&g^ zKc^Bnf6}*?-ai0Jvav&@tHnR|AIetIp8C)+jTbHWRDyuNleK7&^5xq+YVIRWfn9u` zb@3v5)-}jT{qma&9lD3O4%g?inVf9Mij1t1_#09(U(wvex|LciS0D(;@ieAFae;?adW4TyWrX|MXlo;-#hoBU94-a*Az zS@X<3@@gj4@??{~>;<%GtE%^UQla^$J}_>*v#Tv~$EX%!VpDsXgyvZJq`z2)dn^p( zJ0Y3dn)nGDdI^c{H%J)R_Ahtf_!>AFgs;mqzk0Q!6~R!2Km1-SclFRd*=q&vx;@H8 zGsmin?Q6Je@?#L zf7~sevlUF+9^E?zYHO+bXJaaB7fqWYZq*8D6bA1|sU7jvWG=0BSRLZxc0e9YZlRdv zxP4ldQ?=8GD4?nNCMn%6VSttFn|P#F7y7*Rs?}=Ic1T6A*mDH(@*kgwcMlFMR6JR@ zdq0QjghO9r<`2;s(2w$A_u2(SRD53a9kjSubJZ=88#gEQmDV~67*o+YBT7c5@57^` zA5}g*_jf0^gw#LIowvgwhnml*C!`dughOd&gzK%Iaw$B<1(=GS)$G+PMuy(x5Zk6~ zX(VwNi}-$qV2hu(yk1UaQyp0N+E@JkcIb0Gm+KYnqG#UmSUzjfuHP$xN4|VBrd)oy z6$6wig=fq(+RL_}ETo|mdy z4W_hV&>ok0%SI!h!>fIPXhU$#td{vs6x&Y3AN&RR5FK$3K~>ha;2z3ahHV@4UNYlE zFC79NPLVbdsiY!E?hM$LaidBC#^){jc~E_l#a6-q$y4>AccjsEx2UUAxNM@?8(-*-Jm^ae#2J~xJ`yx4FzV@)O~o5 zJD(%+k5kkyBpnoh3mE1{LPXEhgK#=l0!wwr?j}q&)8A!+D6>dqi7?M|Y7t=L1B^zb zjs~D)VVnpHqmfWiyn=*a*CFaYv^pua79wy!Nacpcx??@RKZ1XBfr#8-V&T6X3bTs< zAH@E;SKyA{ZIlsSrPPcG-O=(GFiu-m1rQ46S(&Br+LVyG`cTKe|O4vxl+6g zu6KQPi`2CZG5f$bsC-G!m+yZG%K;Q-U_g}eCD3;1_1xr1h7v71WzY%41Wvx)k~Rri zY77aRpL&=&+gI>qe}+jP;Un;-Wqrox-0`DinIZ0>bJ`3q*K;l*&7>rp0qOb}5POY~ z2K&vaLmMLg1ocpaPfO9Sk*RV{y$5f;tbj4K%%90qvmK9%?|uK?KN}l7qwMMUDSU0$ zyXLi3{xLTXp`C)muO(#n|DL4{`Ip zXK#-gh#J(kidNvw@x#`8AU(@%|J!OqOhO{T;&bKMQ7?Ty+VcxjD`GV(zxSCBSYrsC z-@I$ImVo$!Tu<~mdQi#Qc1n0g?OP@9WAF_O-x8^8N7J< z^Q8MaA9NINfHqXEWYkC833+if>gS4|ONQfc-S5m{)s4+64yvU5?ewZ!tR_WwaxvN! zDm$BcCv?FZo+RMLk)&)FRK$JZC-2GVf-{XTI}^nUpXu_6zGfl{zL-QlzD`6gaT^#zD_9I;&q^qZ;4J8hq&Z~KoE8|%xA zP3>_3m-~h?v04{@#!j555nwK(WtP_tO-lL`dEuHWaAoZO74wgeEN2^sJ|=!y;ueQq zdO+Uh%jI+A{qJ)-*U?-m!k2w!3r=4X^yWBx`n2FC)A{Na@+CGTeA z7fx~$4epFXXmzFN6XVB7Fw!FfpaZe5qGn(-#&+udy!RvOI%!WCBvZE9*OiG*KeCn{ zd8vCq0fJ93K9IW!O?5nk-XTF#;2&XOdZYcudryc3L73!?458k`@BT@l-Eb?0S;u72 zu988;IrEDq4XP4eHvU1r{4|@nNt7WpTz(w#r$u28HH3Y6siCzw9 zad@s2C*aZ4g4zE55a;hsh@jGD--Ao&8d@#h`vfgsIkS1i?njYDK(*xIZ^{OfQi5e> z@>Hsw(|(2F!gp~0`E53|@b6yYZ)eB9fqV51 za`BZs|)+Bs|y*r?>E|g&aEf~Vt zW1ufuHtf3VNS2@HNyX6Kh2*@Z9RnVm$FjGokeP^eL$EhyRYJ}uKtm(_ zBMS5N(x)P&3zO-Jt}CPaoa<6mx-)#r@~td1zaqP)CTQ+lIJ(yx+%no=>hZhNa*s{#+=-aevJmjys{9_v>vh8hlTrL7ugLXoGbwN?=qO!Pxlfm9tm+l>JC>;|0g>5XDtN_I@D%#V z>yNnDf|WUi_5{*-VBRaL{dYy?8uQoB0UnaHMO3!Vy%fEZ9eP{SH!K>3^d5F3We->z z)D(>Affvey56|xK-GEuZ^5Tn7@rhP~+A*mn82rukm*@4~e%Uvc>$#vGI-0mz@11uX z{X2*hB$+5__OLr5LNg^L<%7iZCZ2!X6Iimm3UoRtTQ~C{&tj;PA4&om%c)K;Cvc4j z=b2UP0Xptyc0S(OQz)+Lq}g#{@HGy834R}b9SxchPdrMi-B4JSvtf=~3nH%6E}V~K z>fDWjk+>3_sVEnufL^)-!Za!yhJmNU-Pj}2@|QHPG#N36^L>WR!$x@+SK%2HnfS!7 zwAmxVAI+OqInj)i%Y@{8Wg8JJY;E!a)OKCgXE?=&JxE{%@qHZxwz*cLEH$xBf8K)i z)>t4=Ke&WroGOkPjdjyvQhi`K>@C0)hONrqBal^Qkzdq0xHL=i&d(D3>GAjhV07Ve zQs-YpJj+|zQ1qZ(E@mYj0Wn8%9v!#p!lr(?=orB(-41Ff@6mu(;Usuf{MhG}1_z(s zL-qX%MNgOWd+-57v>3(1+pl^%hzY?Yc*xG3h7HgeAzH)Ku}Mkx)?~2b`zR%o+-TP) zu5!!7WZo^}e&kf=mOfeWZ36AAObCKiUUK%xzL|W&qfsr|<_g0e^ImtNSV;4c2AY=0 z+LKm?U|QcarTEPJ!AODkUplx3zR3RoP1aFjGHa^;Lusj|x^4>UeF1F2LIFLgMfpA> z)}sXnF%W(RiIfp30xOOj9XxJU`Si8#@;louPd~q4pO;&evn!;`&H%| zE`Z*;2?_=xf8#ZRwq)!$YFk?>%Sh+0XY9S&yp|l$>ByB4w+^uJr+K~BH|f(1&nJp= zKN84KZQl87hLJP-L!9N~ug!6jg)@^J1ZpLH8XFF4gB1ZPQq_}@?D((R!g#@f%<<08 zD~(DJ8t$6#WpAH#*_ap@AGC;K!NE(YRu9Qw#jU$^yw~=x89m$f;Z8|zwSo4m+6jWW z>WkU^*-G~zE8Cb$>%F1|RryEIj^;|IqL)_fyyiAo+ezSJbB`TH_E%<=fqhbs1u|8@ zd^J1)MiA(ixq3o%t?=4A*>>pg>wI;I`_O$jDVYgbhL$*y+EwcW9y_lUYBT10o3m?x z-Zz5Ru6$(C@yK_2fq~J@PRdaG`4D$PU{xboBxk1GGF4PmKk3{Mn2h!LWx~R2-XV!< z9uhqX@Y?rhz5|eX+r|6&T7L}s`%eZpdscGr;jZ%8o|T)QFJ-sucUdjbnX!*Bn3qbf zy~=dVHR5-h*Pk@~1I&yX7wXBcS`R8sZ4R^VQ)`n=tvd}nsvBPreef^;b*`0A5xStS z|J8KKzo+)u!N)ce$FzzR7QXH7mf6SNl3q%M5txw@3TJdH&n zn?B3RT`%2gv>SuqcCQ4f03%4^47AFgfd#xf#d}||rOe`J?>+xbExOXf0rW4Ice5Mx6CxMwF$ zcty6XnvC4uT+sR#t%DuoJQUL_A&zo|IwQCB&v+% zRKQB=KIhq4iM9I4E0>2JKR@VDx;NY|G+{a~yyy~IgC|Y6(a^068{Io-;C4Y!nX?fU86-vi)>_v)YTl%+bn<~^5Ed^K1J(P#eANayI4)?4KwBf5Pt6!yt6 zV%5svgZOWnRV%+FERj|=oae@J*mlLe*Fsm$p2HiSS^Ai0@wsYV*gNChKnvua`O|bx zk+2U_XUX!qv0+sbzN7;M}$Xv-P+Vb+v<8r#pqySHy*N$N~Yg^q@SZ!Mva@JFbTD z!SuYX>$S#%Fn+&lxnruW)_y6m1G+AfEwYGhbijJGqAt$PAZ;qZeLVipxwSlnqfQg4 zXPa?(97Whn_TG>wjM7QJ^%lfKcr@E(*`8HUWY7k)-gxU#;FX#EAq0Mm)D+E+q+MM& z3jx|}!hs46=~F$BJ8YEL?+H{-lm!bF>R{Y7y{{yHUTQ(4>eY31$DlI~V_n&C7mrfr|=F60DL;MFZVKJ;m)+ONh1|A3k6G^<*L z;hC=Oi_aqWL;hSPI6-eT#YE4ZW8j9+7OPTi(HXe!!+0%Vgw$SP{*hc%Nd+HUcxD5 ztW2N)1=`--m>IdsXwF*rh47N2j}s^w(;Lp4=!%A*9#!I z5TvKEBS+3O!*3|N>GyxaZ`%ahxu^(i^_6nsSb?fhx{n#4+*AJj(EjsBka7;x$@9$f zFE=u1(z^WPNJ z5$Q*Zy;65Hy>Wh-tL%58GzF}kYn?MLML*`)mUA$%r`{<)k$@IayoKBb9%nAXm@e`O z!hwSnz^#autBKJ{9TRUgJy$BWDzlty)4d=eH-B&?YQl5;QVr7&-pn``ZEp1jo0*3p z!@iH-co9GY1OL%q!<^i@UY}J8yBF=+YfU?*Q*Ske^a7HoM%2wf%iQbrO_?1hPfSUC z3%rALMd*Nwj^-8ZejXEb063n$?ZQu6$LlR^5#uz7R1-Em@@mlN94T)>Zf#N zw(2Zob?sMAf4VU)dJ<{$%-W6@>iUyr#*3SL_-56+bP;`#$So%#&hZTjl zKD-q51g)L_)Ws#P(6ZcI;{}`nc`f1IUAs(}F^fKR8mEr{C^ou2#l9AR)%rG8GN%J9L7f&6 zi`z<8+I?bn4AbI^AT52~SeO2d#Ne%h=)zqQReKDVolhyQKKX6Nj3$jkjx5H+5V_+cuTYV*pK9M|(lbsQ(G zUVnN0xaG!-3Q@|W?a?wdFFk?w0X>~I$L-&6aHYBUHB%5Sc|24eGx}~s^zZDQd63R6 zjLsNlf#U01xfu|@?*C&)Y*opqKH`A@Jv}Ogxg?2h`ylsE)$zE$V0F8y&!&HH!pjw$ z?uS-S(q3Zcx36N*Cb78DAE=B4R z$3IX5A=XoHdTPoZ7j%7F(NJx;I{Xicn8}V2$RQeSQLHG6TlozV{0= z?V^TzGON#n6KOJ(H&lUC$j=)hLxeidm2lopmv$C|NO`|e0@NR9VilBnmZzG0F=Xt) z6!NE64e|r%sY#cyD5-pyLvLaVD&z#meGnZpyg;Yo@{bF+2=Z(xV)qj0z+jV%?dP@E z+XExWPdT!HDek%iVXO13Ab8&YlcZVPH+^OTjK9^PRGZkd{wJK*CPds`EEZ~n*Lp6T z?Y`KZVfTc4v&p#W24-IYsIev~zE1y3jXh6MW6#?vn=}A5ws8B&&tY2iOt2GTCn6d4 z$FstwI-k~`_uXEOok{l|0yo=Lb{JM8A}YYpKj-dwaY3(3S;NI<`0tqmrV=IJvt7!9 z_iDWDY&R;mXtxZ~pH*q4@n^+<{5nm{AS-cgM4ZnE=oU2z8fx`8uBdV50Nx+d)4V$Q zU0>Fk4}-pV@ya;)=INK{4w!_g|3l2)tkIgKPkJ?-?R*@cxP2g4D_*zP9Af`UA|F2#ZCFxlA2dOdV)R{jqzIV9-w^#72}lb zvA)mo`q0$Lsk16^k^+J2>2({+*g{KV(J|W(45O+}FUa&6zL}{v!a_cdD7@o? z(=u^v$M7p^@txdG5uI^$E4o6oveu^!>nnvYN0Fbftxr7uR109NKAkhV?h=i1WMThH z`szl6v6<)c;C<`%Z@RtLR^rZReF%6YUuv+ay7@UiqH>e&z=QM3KNHT0?UJi4$&1E4 zSq@R(nmleHb8mXM&Phbj*Kh4h>6^069dHsr4n48b5ZuE!?d55b!e$z>Yqju4;lp=D zy=ktvo?oe(ntH2q-!+g+{(fP0fs7E_NkI!YwC6<2=yB)ipPh>J8Z5a-yYce{W})-_)(*%@6Ixy|LjAz!Q3YE# zc>BkTy~K@3qNzTh)R}x|$s9u%Ew)I2NnU96B&eoaWidZgj=C!V^#do1!XUS5(8q3e zVjA5=%(yyl$>RqAlWGLD;Jai6eE)hTRj2=?+q{_A{znN zo&aYjL&)ifV?7d*yzTf|~6ruo3_Pg|+|eqTZ zl)M5h(F(vhJ%E!F0xp&nFvj#Fpn&6B`)e_Qj@;Ia61*j__wYhiKBYd4=P;{pG_T_Q zST#?omEP~O*OCi6E}Pw#As(Q~z33q_M1EI$BMg+vAC-w6*hg=pWV^;nCWrmeGoag5o;xAn`4_%dX=wXZ}WP>@4e9|yWTX;iJc~=xV|#0PLpLm zUgD7t_ukO|6lhMcj%2eR=@&$=7R8;9eJ7ysjcySWlX?1eKxQ&!-EQ zuDu5VK`(zc?w36QUnAm3Vg0uf(Nd@n&R&&mJl&@00@(8{^t!ZHH)z^D%lm5FPKNYR zh^5drS7cXuT)OzL&mL7_&HwoRwBAU)jb~^!z4<0(S@X5Q!0fiFua-s3i@@^rlrrpk zl9=S#^L;uG?U;tuyyuhJdcxP^MXz=5iOx?wF6>t*`K&dP9C5?iCG!*3NH9Ho(W2t_ znvV*!|Gu~B+};7ME#q~v;%#ZfL;CIouf@><|9l;d&wAWZmI^e|uQ@xA!75EmKiXbT zGL*tUvU{L4vd-q_Zuf@w>oTM~Ew|qVn=1~|N)~&!c*d|^u74IE z)!eYiiCY()&)yJ=yfOC4L7{geTdM*Ym`XPxC#@zA|Fv-eEvjU4hK9ui8B9N~5_|5~ zChz9y(BYi;EA>)hKN8!`y}G0vLC- z5c2l&FOe{o71NT5aBk#0u?v~)yRGX%7jE)-Pa+@v6SJ2&L1mFcjO_;x-R6}FwU15w zs-APDU(mf&B~)t9wsJ=Nl9_wvAr8bR{&N4OCQ6RMqVAJLu8+Lpqz&&e z7*k&;EX?>aPlMvYJ#rHs{sI)CADRCsLhY&*^aHT&>PUBwD2ZDqH6NeN@iI5ieHCMZ zLF>PEkBo{mA_{92FBJ_ylMa)NQp~U9KHgD~8sW5ep(%=L8pygZzh!jG|F6u&P?OL3 zu`d~G3k;??YuxW-m(@mQXXhqoS(q2I)pg1ZjYh1+GGmy!m`O~rOi8Rs%q7gT$74Z* z_F&t;JQj5_1F^7OYx4==VS`momC$+sB%=ND%uN)*fZ8_zv7wx*GlP0&WUvj(ge!wh zfMq+BuOTnnyHY98Y_Gm3_yZlQXs9~L zW-VKyqONN56JSlOXU4;6PqE~;yP)WuS-K~=1^|-5jwm}rl_-;LBn3IEKfAEg)bM!}vc#2{o9!1LDs6W4;QFsB$D#WrsAEGx&9E9S(-u~z z_YOq&RS@UknKIZU4b!)~9EZ*!4KRK`D52x)a9-+DU&nk`RC`l4-U~<^!@;_@N)wpt zdwY5|YcAFG!^3!z5Zc9S30?gv9%$RYPyU)PRnR%j#J``+`@I2xCtPeWX(vd`a z4Mmi7tism0^XGrgTstGVj)s)?*lPr|5hgjr@=(T;@8C4saVNLOs~gYK+y9t9QAogi zg>U7yM-U8n*12Y4rIfNR>ccnX9Pe>5nM4bsiT>)omU9k{6|9xznNZ~f8^L?nX`ByRVUF^$T^P0Pa zNv^a!Q?;Tmt`2Z6NBZyQcK`MAE3#1OZ$f<3J{1`d`W)*{YAbI|LnT=-o^&Wal%8-XoyyOXu6An=lnsYDMHKal} zIv-{GVYWN=y7;QqbWi)HGOqW<;cyqMkjZ*ZwitcxTVcA+kekqs4g0YoJ-y^nsS9}w zyl29{wF*Cp;a7EX1-{U&ye&PM*>n-FZlq2f;-DC$nJcGmYdV?00ABF*l(1-lvC{;m zF{OF8FC+488EzKIwK^3HF?v>EpK3*(pk~(9QyqmKBRmz8?Kkk|7A`s=Anjd|l z+X&O#e$pt((-||CjUN{P>4+m*-QhL?Q%=pAOtVlWg@m^!^lsUP-n1$*M2c8^5#%J^7tmP!PdKlj9HPDoc2bPKjn4%_3ZV z>)%n!*^P$uiwkqmXoUI8G5hUd0IQ?@yaTi5fsthW(n*}xJiHFn!y%ONI#S__?YH%Ak|=F= z>-@6zhG`Eo(KPxn)EQ@%=}9qITeQCV<_C`Z$V?V;7-7%t#)C+Rb3ZDaERI z!uGS~06@zDQhC9)#lI?G$X}AC;xeDr9@O~=xt_fwhDJr(@nANmbgc-M)`7CQ(G~23 z1Lc)En`rvFytJX14OpOCy){y)Cv4VW^aD{iIE&U}^64JU-SwA0`@&{;26cEQPZ7wB z`XY`&Zp;AM(B0V$`!#5fbZqK=9_9I${Jd|uL!xvG;(9kz)3FMifHk9M*UTIeTwgCL z&+2XZFJ@e)?An}wx8BMDv1jh4mX+Vz?H5L&qUgekmM9nsQzwh_ZYSH`JSxw9-f%`q zPMil_9Rkn^m&RI@^FI8=k2)8)yRl#9*{@`L+CeARZzXK(t>T8A-VXn-rDXM`nkW`g z)3AcS32iL>Q^5$`Nk(?18$3&GF8_oRi2a>7ZoYw zxAB!JQftDEA~qHF&(Kge0~m-A&YI4St9L&_3h{M>RoiJ>hG?sVbXA|hwP7=+GMoGI z1^*vmZvhqMx`qu8!_Xk5gmkNvf`A|}D5WAL3ereRhcpZ`7@%|rf|Q_ir!!~y1%fV}78GRM?;6~xU{!EKpKchiOO~Xqm;EBR$}y56D^2W@ z(%5CgO1NvH@1uY4nyu@jm}iWrqzij&U7YsKNA_$GSV2$Tg{A!=_lyh+$_N2!{wY`f zOT#i+B0l9ZkSdH+8e#Vw1T_UY@=Y2W_{Kfv&Z>Q)ua2JmMjkq)r~hg1K``mFZtW6W zmq$Jz5bA8H;o`A~($HPPDlT5S`kEODl+kWDf zW{Vnmp-7TX1fg`1O^Q)XEUMB%d@#xr(K8+_2v*N%%XO@H#3)=WziwymQx}-|0koZe zmdl&?Xp6jMynEl{gXOc!Q7r-HKq^D%b!d1gLKVI<&A;sBL`ToteiC_{*(3Cp+{$=p zD|hw_A6u8`3RC{HSHcyK{P>lNZ1#1(AF~b``tE5AsB___UVkW6+oWd-Hk5HKhM1AD z-Z^inAeONX-TRRw(IuI|K z1uwp(82Hi4@zh+1RMif%1*aBVgK$2yaE(NxO}FP$4|2(0W9m0yd(ZbbL}A6qE#&g_ z@vrK668Ir1yg1qP=7;DOgh8MK)H1sWxzKz#^nAMYciR&07*Z0d)TSZX$tRl30%>!7 zz{&AFa7BZyj|7>zpJ`(2dHch&XY-HEypylnvLL#m+hcvp`c;UCh_YyXv=|O(le@%O z);^rNZbM3J9uX*bO_itZw@SZoujx&nnh#D@*dL)nYfN@fupA}VmS}65`9@@#x=|ho zS4i(=!ndSPUrVO1cO)Edbw96lVfS@mUq{JR7;2g3KPwC@iIwCmZa!B*y88(+k5VKI zU^L7T9UDw2c`1`2=-XekTk~FSIPX#Y*)jlV7N$dn8^GX-(;s$0fBJKa{ISNysF8HZ z0TR0$UTyVrTS7O#rL}CnE$|>o$lEsc2b)XNEprF*2ZtwSX-!qse;8J(Bdc}|{wWn1 zBgcS=C|H=O_Eb~yJOY%0o+p);SAnDWZaHx|<=suA$9yE6vmf*PV!nRQzEUx6%n|eo zxh83KxXxtZu`aYk-m~)2@n4l$G5zz49dB)m{BKF^MlrTCoB1vkIWnvV%(wT);48BW zyfyeecq7uPaD@Xop||FmU`o2!}4__p#(h}Vj3;3~Ws?Vwj=Qe)y(K;B8- z8!I33Bd%@fR?RK8_JlI&U`t>7>!vshQx_-2y>p!uohu)!^yNR0KW>bppVHv+YlM<^ z@WfGj+`g&qE1%Ek|44L~s**$|L-cHOv|3Fbi*fW5?q8){LUyB`3uWNBUY=zja=PE2 zAZb%o&;HE?@CBk{IUmx$7^lGU@(rnR+wZ$mH;m1I^noOC)dzYH(tQ#GVh@U+oB54F z>r~6u#1p8%OMnKd_I5_7DxUPnaE#u_@CLl}Q6AwBzR5Qfz4xM>Qv9{uq zID!B^32ALqoYL}#eAC^bQ{U36Pb;5=+0;??r^TP@J_J-hdZgBIMAx9vqo!fDyb4`%)p1TYC4dgy>rz|dRJkG;{Ed+_2n~xLIJw-DpD=b@0I#n50g4Rdwu$3k%0h;dj zH-b+cu8P_JOR{qvre+$ZC5nvSj+&q5T*2{gqg zlJJfhqojplx~m!C60swzt$O2p>+}dm#Wkx~j-@{S`8Jiw8>wQ_0y!*gZs(3-sZR*0 zpmISwUsesLPh1T9gi2oeKd*hF=6y!d({j!sy(f-wM9XmLxN{GGqJ1*tmQ z`?P(^<{wN_Jn{phU)W3T201)fD<;I8&2J(&cNlxhU6ET+u0dbzOGeL~WlvrPl7Gmi zNr`;D_J0^+>1rcwNdE{{KALxydMVR9?UYouCrBqN1gKgApx9wQChF3?NDRsBFx(!k zj-jdlumgy#;JnoP!8XoE#_!vRi>M#8%T#V2H=@gn*&AaysJEo^8qbeh^w%{#YO$ee zh|yGBdX?Z_K6kqfrsZ^%i*_Wy(Y(cAK8ep@Q6FqJ0hJkP03B0#*j-K?(%t*jH&>-i z;NsawGMx){?NLK3qu4z*H+7{C!Sh zgd^2iOL8g3REq%u&mW;NMu2RS;U&B3jqeY*w}4dr1bcG%0OVrRcBkMr7~V27QC~-t zKTJ~xno2*G96qLt)V~U3ShF$hLnJ>xy>&t~2HPVWi)^|aIcJxhPPsiVg-F-wLr9ei zmvG&X?~|52LrW*n00L+N#rcaq(#Y^k1C zJep^*Rb`)8CEBCdrtGA2;IfdXSQ#+`oJ;sc;XJ6(90)UZ^c5Xn6cuA-NLR@H8TV>n zJ^F8J$%Yj6i(OiRfy;W0WQS(OO?*Q z%56NE>fpjejPH5$#rZ{a+3lv6%0zC9-A^0%LxpEeHHpsC&C;JIUe507I7mokxOEk# z6?H!#6z%S3dhI=RJE+ zX0J!&aNeKmW!mW@d0fwh&xGVgt43ig5G|skZ(zrrnW)>(_9OayxHkuM-g&LnD}BPZ zjP$#RZuoMM;KSXOpN4JKh}wbs@nLK?b*Hm0H-AF%2sXdC^+foIq1@uHItnU+9>C9e z>1>PR>q{kxJaRTAN~8`8xYloE zU9*yu=>5LPGUA;-JhtHOZ?W>|=Como&dK31uE?<~rMI-mipdc)=I0X_70j<0Vz(Hp6>*v_ypRr*h63$Vu%!QBHby8z!De@` zw&PqF%h6u~eiW&K$iOie(QRsVHnip)&=(?`{7&64N3NjzD!miz$lpm+nV=RHFy%A8 zU=A}6dHwn=XqbjxQQBH}?3o2nF5@2O!}^lto@$a%B19BoW}MTHUDJ#vXoN70UMR%;L*oX^JUyo&2_$E#&_Kd*!`uySyS0&bTmzUM^=WexOVqtq3;?y^kUGu!p_aMNWDrG+!o$O;`k}5-& zTHzXZ35zv0FtD4mqWPKPhXQQAcl~UqHT{#G;4pu?;Fm{clIpcDELLAL z>=|`;Tij(Ht?~(8n{3eRrIKSDjJ*E4`BBZYt&F(%xXzBbY~D?y#sxMOM!{AU%6R$H z98dNel#oqY$m<1QJ6RGRO$I{v{JzC<*-EOIXpFV7?qZs4M*Ra?{DEMp}2gg=$1copS)En!J&(4!t)6 zJ5H9>Htp=oAV#}&et9V^uFm@YTLR`~Iu^#kjkL_*;ik;`>}l}uv<=2r@#V+g2098m zSHO5^XJuU54e(KCRy?B^5(kG00c=vjeUtSeDwoWLdFg4TdXy_pUX!e>1Nzu=L4Wo} z_~;H`bB?@Y85=et6{X-a3wW_b%cWxk0yt)9t(q67DvZ$0xin%9NW^a>vcx zZ6WhOKK@fEJa*^XlEf@RKlc7iqu>>pd*9WWgf*KmarYbTV|e-b?g1iTCnWBDTrS6i z^&c-XZT-HD>R(sszi~yJ8Tjv#k(B@AA`m!x@0ipsE(9TGLwh0N+rGty6JTGnt{v0> z!f*bZ(YFI|;0l8m&?mf*#SmXV?Szjc6TRKt=M@kTY5rz82@`_mRv-sGTJq%iACW4@ zammY9uEc0(s&th1$_oNs+qi2M-$PWXoa(a>AAu9azhB!!PT5)!a+m2ptYUvYJ7{ae z-NuV*gl)o#>mOcTuv*w&25g;gKBaY)W(x403?=ekm=w>;=H(OUoPSEcz)W=5ron2T z&*iZ^1~#4fFEcZf3_7!-Bg3lP1pf(yu!?s8zYH<+^xNVq=%N3+Dfzr9gqH6zst> zUI_K425DZk&tD<*kR4oM+ghl98u)L|CNIj&;pxK-46f99ZN^8+AGa&u0DAml3E1s; zm>KracjH{i*EJjb%DQWu<1Nqi z3_bF%y$4W=d$_W@`bRCa>5zk9@Ie$5YhM5DdfwJ#2c8nZuy4`>X0{?jMH|chAEzd!#QC?voEeBQS}q-JCUX8X-+q2x&`uZW0- zxrN2eH>}mhy%0}oF|%anu3U*V$8fQ6HCUotZ$uIS9Z1EPat8}=Nz<|;%iluS4acu}8ji)| zf1JYo5L^2{yUD<_&`w5A9)^Gu!XKL?&;Rq-Qo~6HfaehN20M20(Li!x^mGrj`Q{fA zOykMu`R^DQB<+H8{jm1$Q+oj|FmYQR;M1@7F8#A%9aUjJT=Z|506@^wR&|c+yW>N; zwrfC{aR!W}x^EQ#wlvA_C2j;gG`tZObDJW@wIiROv3!$QKEENJmzP%{GlBT_ea8-j?MgFBs}&}*YKSuh!1heqE8=QD1UV>COg%TSKNA1c$s|XU}Z92d+F#_ zbS`o)F)80OP){3itH`J*I(0IGS6oi*IBPew`(Dk@f3Q=~TFL-dp^)3JPH+h1&%+-J z^*)%*4iF%~@>Ve;Sh++pDa-%|p4#kH@t^gJzGp;-@}T@&u9aA%yauqzaKc3&D z1^e>lOEK$iCLch528#lVoUa!@Y2Nhyv&5IqBM8wl*4E}~I{YR&KYeyOO|?S2j{q~p zg>Q_eaRmiJ9o^k)?Q{~4y&y**o4N-mCg&Fx$i}|4wW)1RH8@hFA9Z%#dp=C)BImt7 zXtr2=^aR~UoF3^I2gv0us zcAmR?_ioh}MK$T`6IscKq(8R0ej6;SjVp^w8uA5|HWgId|8UB!e|p4Nwh>C@_PhCs zbfNYUAa5M6QL|ybX7{fCuye<)cWs#=0DeU#?=CHtU4%{ELp_(N@bKGz-Q+>M>zlS>;y}5%a=oCMjc0eh7iJ{qlQLDlp5ZIu>S^4~a_@hx|rmNx{hOc-#iFYIch$mq94uoZ$_ z2vG>w7(ILf0P0bG-oDytQg%nSEKpe^`m<&lcc)G^B_S zUUno%yUam1+r1xtQ6imtMI|MNnt*UHTeJF6zU~p?MwaivhGQbvjTh;}ya;t}PAGW2 zRaJlnE|i9UiizY9t*B2nGW={79nff28X&nTZdl{6=ktCf{Z@eeU{&SbhF|!kn04IV z+T>si3*X6J$_Yfh^e%Gg65XLix%1B}v@`_&^UAS<(jDd}c{^-qLIWN4S1&xhK;8I` z52FU4!6fOPSD>bJXjxuV6W9c0B*XI6*uEwWQ8*%Vk&kXRML02*69vx4u6wPIRf4tl z>+j|Ks%+*|@2pMKl!voQ4{X%$Xmn8ya55wLs7xpy74ks2Y4Twi-I&1R!L&87H^0D) zI%_=H5a-1Keb&2ZxaN}h7mC|hkmx&X&v(ezc7Zg}p&3aa&r`e!s4YOR&kn3mSAd4b zcwzO_N`~yLud+t}USvN6e0$5);o^gur-NCN;399TifxGMmwlZAYq3ol4+2+w!P-h~ zyQU=W4W$s;WC=J^JgOVNIv)Ty{1K;o>1M#gdj60Ye%#=@R{$RLh1a*OUL#3Ekfh0q z3eYoYg-kYCQ&$ir7TskQYuO95|SPy8bt5u>DkH?WMr&#gwfUKW|dI#O@ zaI1|wrVs6*vV$?rriFD{ZyEph=xI>-uaGcYewJXKhcYilZ*`-t_Db(_QEM_R-C=PN zk51)lDQI#qFD7Bl7IZK z;8f}rX#z3(JxEYOkAUS?g4p2QH&W#|ZuD-cuEu?7>~L#ty>9xn2xwG1%DJSuPHK`V z3XI+k4^0CE@XRMRyhvyNITn@0Ko|l_Hs!yg-lP)qDG<<}?ElV;yvRfH@Kv*Bis$B( zqfXZSEtyK&{x5`z#|N96w+LZk4=wu?mgPQu!W%rzG>jM1=5=Q3uOSQE)P))LU@7{PZm46B_|rcL)( zhkL(Au-OxmoVzXtk+SVmaJ%KxY-dbMgL=n}>{E|Fyn|M)Qpw`yxygJuUhV9h_)`fj zU?CJ_e!g8<*K6#HM8fN;Z8`d`!&EaOgf&Z*s)2?15Q8;;{El4H4s&6vXI#e~u{HuQ zgr};@^YFA9RN7ey8&>UhpWyuvSRferbPN`#-|v^FopvW(e`+4G8h4j1NAG%hQ%=ti zJSnr*_tU*24iBz$v!8j7_V>q~$CuIqoj?QSKo^d0O@}>err2n8Hk>& zq8z~+e-6dL7?jp6n`jl|l|5#0FB6X+4FC4}^~9RR8ewhDrb1-a6h;{Z7?bZ{!`=+m z`?#Bu6vegL3~{(DF6Es2b}}G!WglJ@ddxq|v`3#`Dvafh7qSRg`QGg{BxMj;TOwz* z)p5|jKa_iT5ZCJI(A!H(y-zEgnVb{QkI1OiVlU^%d%v9f;9ikVCCOXVhBL`^6H{?$ zJ=^GC&K4`xt1GKfacvl@wn=mRF=iJ#ooH8XgK!`zqt@Suo+uxqH*PY>t}Zj&U~*Ip zrY?1ot2Hp#x!%N{T3Lp8Ee+jmg5ln+>Pp=CAA19~S42vXn!mGV2Ry(0z_m@CJ4#>c z7^WUYCr9@C1A#_w<(2FdU2425R)s$bHBbT6@!DvPpOv5Fh6LyiJDN>*x2q~5p3_su z_0k>I?U(HCR?!`*5oTF&YiU5SEV0d|bQg_)rN!qB9OpKiY}8c&l|#cOL_;`W>Q$BEyc(o!_IXM?_5;3gY(lca zbayIFom0Wrx3+6T$VSBR_EdIwD1-2J@Wh(9l+>{Dk>5s58>yKE??vbszj~5o*u`dq z59UYv^`V5lC#oj~)lTaV;0JsaBpM|h9UWWDQ!o(>_KlUStiW?P^Vy6Bl9lx`wKX8@ zbtjHf9+mUmbwS+p+IY4R*MeYX4xWobL#uiz4^&Emi2T*wkB}TZ2)hJP9sAY|6RqOQ z;hyc-LcZ$_AXrbQ{;l?y-V8z#bndsGygo5WW43Y0;^Pm+q>yxncpd(6l{Q){I`plS zkykQhVf(7$j$tG5za)j}L99T%;cNziZ-L5DVe4X_d)&jje{_@w_#KSy=8CV!>bpDc zyb>C3DF0o1k(RRy@tk%C52QZ@WbWLi?rac6-#t68nE+dM=HKkyu zeK(kzqw0fV@EAxSj7XOVFd*HO%Lz@B-rSMnDUF(8^I;uD%zY1Ul#h{!k@KhNrEQI# zrmcuB)z_%kgZ|ax;a@_t#*XZ zQh0E3im>bCBUUIGTXFB@Hp-JzZ`SS?!L)9+{kL zVFz8nZA7Me$jO8$o_A+TzADq0^tj@=uS*jFVGx|OfRht77lVuFT$yfaqBy;SEhxT= zcY0SsK}yrf&GLT1t$zMR*ok{kJ$y5ksLHaRZoI^EPCp!Q^|d@0d{C%%(Bo-Hb9h3v zvNU=S8?&N@hKD|MzW%(=hMsv0y!jSwH+?JT9&3MOp)F#mZEC2z4xF!T83RI?B--kR44R4!8!*+0A& z0$V2G<*P+?J>|7L!{P=M<8&i0zfRV;+w^APdTwrh9*J~_i0f^&EaHhHUDvkckW4+#>s~G!B*PEse=TW_+I&P z<8%a8yf&0lXudKEJw`8dgZ(W+0@53tgEu|6Ns$5W#(9yLrm zL}qSg%A*oi(x0h%Q&q{m>QiCJ${PI)JA23q>ERmnOv)2SGN*K0!2x!scU&ONe|EaG zY22yq$H9+1=GUqA5~HipyHDW@*R~-i*{ngNJ#5?w!N;skcTVDNL0|#Y)Fazu&uzJB zkZ5SpDabE3ONH2r3xr;1J)nCk}6Fju$dAr+}Y)kx$(! z6*jKN7JUCjJ$fK6Hdv^amMVE1&r+y2ZSt$cwe_m6Mt{SDOSg3>>Cp(_aVfn@{%@!F z=nis;21IBY@34(~(7-UywYRnm_{0$uF3((w*(RD=eWfTwuNwZWR-s~f`C**JTc$nt5A)SCd z3_v%dBoSEegp1MJ+$|dq_CGk{v)c4px)9iJICh4b4~7GAv$s5654gdnNe)M(Ghy|& zAVJIWOf^W_Tr%V4Fp=d6;KKHsdC6$xsmiXp z?KE2U$yt;6&UoUj-zHTKiKid~a9KXhdhfJ-f73;=Qx&a>fqP@pL+9FmN8jlYD;w0Y zQF+7FC7&Vl;6{d&*o^_HVIi@8djIeH6RPLayqm+{^^(HY#OV%~O;_4&3AGsl$2t$f zn2dk2(ziydU(01uu zm?xps4IFj}UVShfI6W-DH1i>t-7TJ8Ktq;Zt9g&Pap*Y$MU+Xxi7lsAI+>-9kqiVVIMa&Hlk&ao{BLAR;3BvQC!w zG|w#0J{%CgO{rRL2rv=+8d^4-K91)gfpO8?K_2#Af+o(4DB!Z?VFFqtK|D;$$VSUR zRti=@)`X-#kEV>&C6&ka>2cVyr1|bumh1is{gv};U3ab4O%`;;!C09rYP`fLv+!}* z0Ka;&!;XAvpswIW$$M2Ba&-o21Obj;)msbgBSmyYOH-#=Tud`ASCP4d-9bmL%j>S< zzr%mO((_2~po^~zls0gXVM`~Q9*xPc`(JAdZFbgeCEq@0XrfUiQS3f3#`}V?!DH26 zQdGP$nHmv((K7RRz&p-3BeGVfw1Qwp}*H^T*CAuTvH zHo+4On2lQ{ugIQ+i0!JLe%Xlx6O3ZGRU;s37fID2zC{2NnV$LscuMRS- zgpH&!zi|JGC>!8u;F*eK7XRM3Us>#97=6Tl{~fv@YKC_0sbG-Yu(QW3{b8E0u%nQ0;C+K)pHEcXPwgVJBh2OQt9S@{+=p-}=$JnCf=4z{t`WkV5! zl{D|~O0|%$5sI)5(%Nj0+?b->ie94KB})aZQJO8XX3lbXaRRQ2P)^v`VQd=qtEv(D zaQ0?ZCRL=|{yPqiUGc7L_4kWVo?DC+*{YH3-$^O)8HW5stP5)kGMec|5!2+YSbY2A zAp=2A0N$}Wt-bBqAur1H^W3bYvX>kY6m7H~%(WA`&u~)qB>Lddu>o$L_Mv)AAZM0A?OB)OvA}CPPx8HEE z6LY}r1jr){3g@KbwcgoR-p-O<%#anfvb1;AP==m;FR1i9{;GYl&sII=IP;+th7)vO zvnwuDM(#v`)G~nlA4=|oGUf9_udl1B7+K!=D9c|)HSIBMypN~6CX)TCaIEs@^M0iu zm>2rkIDLjYuz?}UnpC0i7*7w{!;XiV*6vD9&?=hD=Kp!OGHyM;ufiRKX_3RakQwc5Cjpt-2 z!1|WAWP((P#4}-+2jZ3jJxe)i(j=WHKKE||9eir)s7x{2(5be+y;w^E8jA ze|!rK;o)>iYuSgf(qOMTj9jI@gqG$QcM-jfBr-bmBBmI-mu5$YuK+r^IZ;hz?cc3y zjGSxs7Z&G7_Y23@oO>J0bp+IR?CoZ@ZaY5%m3f`(E|ui75wq3Frs~QLTekOChw>hf z9bR)=shGB3Twtf#Jk-z!9QN^vQ&R>XD}_8*gwOx2ONak<1Ed(rARE1U(~zk-%mlt? zLVs;5EX9k4cdVZmYDF8>Y|6kaEGh+{IoH~Po2l8wJd#Uv1Skr>chKwZ47obPDc@-X z=5C#c+MH73Z8!c$e!=&x*8O!u)e%QRUywnB#v*7@YCO$Nw?Lb9v_%;jcvr-L8>GDm zXtE}#?9;|SIkj-$EVWhak|pfp&$c3kI+U_3#Fk=|nYM~SnUYY;;wXTx;(kUzpm30q zo&kcKULa^|9H+u9KnQnp0~JQTa$VF!BJDEmNOWt*M2}C3{YKuSe50>R2OmH3_P*&M zG%M>YpIrHwZ_FuRDEB}@f3C)f6T?#*`!fKj;~*wEa|lLRms5q-044)d09JZK=47>0 zW(9a|mPrQPn8o5L3Gto#WsloJ*{PN|RV9HCq{ETrm!&5XCpJxB#I=147VXEXn@$~j z2Q>Es$F%&-16Eq)$;{9MUi#28e2w)*mVlac(=t<_RW_57=IT{2JD*P7cTZ752h$Ee zu=Im@6eSi=&s9`h4~%$;PJ8=yVGl`Ik5<97oZ&XlYL!gm!4~aP`oxpbh=M)sGg?bp zIe{xv*84BxdKCl4C~rc=Nh{K+N6*E>oH(}zB|l?-tHQU1uoZ!zl|Q8_)C72=I20QyX}_g8;JDbsTR(DlEo`_kYBcDg;(kuUWn8&+Ka(twRw!4;4kYQc> zIv)L0!W&*>^VtP`%K(i|MycqACTs706SHxt!itMTBx~cD!DEi6wRh1?H;@KuupYI) zd}Iik8`Z?iC=9CVOK^f7&}!=HU3aTO<1T7b=bm$oxP$}j+P?AAmk34rPhP)%{pns% zLba%d6i9K45<1+ix6dRshsu;MV z`E^J<&%5$70P~7(mwetFS%PnEEB0k?+&TCrjwwI4mtHg!-b}D^ROlSgwcy3;(I_f! z2kuM|+>!4ws){o=_DL%9a%w-RS=arPj`&E0-o~1$2WQjRMX4U(+@h)9*r1oj?l+Jj z)59(MRKI7-FTHP1>b(O6jqPh?nAi^XME1+6(F8Py(v7*|D3WrU-u<*d%ySO!t3)1r zzflOZ-@cjx$7*bDlgH|>DM{Wy{^NL=ExCgWtOBfzJgO19lCc3ClHI#}jvqx}UrIQF zsF;>fW`sBJ#Uvr|B~|Fzk*~otXwMmmfKGOT+Eqk#Z`Y6VBX@=oY)#rxz4d}Y)1sAS z);OVQQ?JUzvxZ9BN$v@H-e7wtpQ+8SqJxiZr2|Dl8u>EyAs!N4ZBOD^mN^!Dr^X$pIJTG^l9S3T7NHk>nOp@>Cntr@woj5SC7;oDMelUHOO1({ zExZ0e5@j5+E6b_!OGQ$^1-16uU-kPw=txh2VZ%D}9!Nb^SS0M?#BV)lVnU@ktOM#J zk87!v$7g${XA9=i1++EH*B~`4N2%RA0GuGrgV}KiIp%7nXGID{e2lmnScCzoXUI`> zvu0w*8XSNa@cnIeN@RLuNslkA?Aike4uLrxyb{bPj%C1BQ+!aC+k2D+2JN-ydp%OpVJnKnh#LbU zXD7liVJ-Pv3_DA`)nFxdR7X?J3SZ>~#1S&JJnug2FKuZjsdx^7MNn8Gj+5lg%$s0H zmrVr-ENmw7bL7|F`*mb0;7K;J4|##>`bZfJ5g$cI6rQ@U@kP1J)rME?hF;j8`r5I7 zlpQJyDO1ee-yiX6w3Z61or+=MKWgawCSdbMSYvDCVgwZ#c57tEt{k5Y{`c#lEU>=| zz1P{*;+i=k8%*vxE`knWJvnxv#ek3O2BcTl)~ zl@zPBCcY~sEc}B=lGjcBf+}PxM9}h3c|H_Y_u>W2elROM93<52{+L^g8}0mI?{fb% zVa!t?ODfi4?*P?KKcDbem{NpL#BM~0Q40S@KP5*n&gFhPs1j5eHQiy^D-rLo28Q(e zo6$2JOp%A`80fGCEbyxz40lOKj(NnncA4LPysA*`kdf}NW>E_<~OHJy64sVjUbGOi2j#u_C=b>+ez$nuax88;rDC&OwJEVNLL(8iUpns++#ow*VDd%F?$H@6zD_B?8p%iSVk>qB-(+@8;5b(t?qSr?HVtm;p6W zpK0#NLHg!6WC7bAKQ7Wj@)5bs$CY(8V~7jb?CzoC=+zO z(Jyk?{aNduYM3nRDrR(6<~TP-B{}o+wdCg95xdezt{REoJPY+$<*=!J2^|^s zXKh{ABOkmuE!@s}`}eR0hMS|mdu46j8`p=?H!>0*FC|J4EO*-=P>227OY^0@zu7_Bz z`(y)PJ~m$C*NU*eNq;e7z6%XwJ{E`TUr=MpqwldYmNE3cqQGO{Em4>E)Az&vz z^Yu<@hxS1Fj#+h$+iLW_8G3fVW|!&~9VvE~uT99Kc7M#iA&o4?a)@n=P<#yYm0oki zB2sRVTmFCm#uxP=ZSEB!n-WJA29|Z7(~qP!1~CIA())d1fK<*ppmBaG)uwZv^UkBf`~@W$X=paR5iEX|VOv?vG>oxnx)PKVIptl6C4OX6?-Zm+Y#5_PO49 zK!%15Am6;`P154-r_Qa#034T9bpN&+J>Wtshv6tPW19n?s2C{1MBYM{xIhlR{0}*} zp|Zm*Vxt$|0my*4jPDDVn*g(0$CpTlJCZZC9;^41a!Aov6k$r}Jb~s`?XS+MiY|`k zc75q6n*dc$>mm*MB@bQ)-lt{bJhtwW%E zlLG$D9epVLw!E8sP{8@$%ESm5BIrxcg+rg5ZZD32-DrxHJ1dqSmt+PT{Jhel8)?7L zVrjbpigeLjs{7QrtnXZ-%_+d|`e;GYKB>#1d%C)If+sc`Pl_Vhn{i~#=^K_^0)q1T ztGSy%iR9D<$^hOwlM+jc#fRl!4#bFRKGz}2v+-~?6p>QE0h0&D2=m;SSYB|YEM<)k zn{CEDU8(`zXrBkgvBicuo1nx3j~TtQY+99F%q)R&I0GTg^B3&i!0=7awGuEFG_)oS zvYl>h2xpP%ajILFq<%cZj%<9Q(rDV~UqkY^nVcXENv0y69{5{e>@M>z5A$IrCs7cW zcWHCCZ$-FdsvstI`OFBF)(FN&W7>sQAgEoy2Q}sBuwJ*%dNNS~ee!7siG`-smnQ^% zRWlSO88)F#3moexspFOq4|K3JXnD}v#qL%cT>0N=7l7E@1#e_E5S~`XhiTI^)$>&Y zdxS!$voATISNksd7Y#~{4_($C|D#IUzWhgT(G#_EKP!m$Vudev&lKLjBq?v&*$mR5 zPwbDzG(diS<4-OfN}pQ`vPBvk&7Mi%sfQtGqjEj2l;2!t$NTQkQ=#&S%qzScT6SGMtcwL&8_C4$P=@xz$ zUD7?$OgVll*{?Dd z^i{(rWF9}ArP#2H1qJ1>RM}8@vMy193fGY0{Zy_FfyCy?=A|!xD z?uqbXcz&0gu9RAK#E6Sil3AnuKKF?PT^|t@cLYOmMs~}c50jQ{HcGJL1cleZ?7r=U z9Hf?JFe0%%dd}Nb9AemM*EY6BcAld?l?cZ90H*%x3ah^KTfa&zR5$fhRMC&ULAB8k z;Lv*j64sj;mG{A8vfc;0?oelRKP(>-)# zlj4`@C*b$i2))?92eEWId~F8Haee#|hP$!0+i52z+Lnm7q7vX`#0M_^l`CH+47}^> z0xUTuG#>b?xHQNTy-5&HJXZu)94L?ttlKF=l|mpPvbnk2q?`|9e3+lo%$%#CPK;dZ zwp>)+wYVgIeD6=UDnwzA3;8)EHg-;bG&+~r7`eI**sP6)efkR` zYX2!blemOx?aAFfR>zN&FkdL!=Y8%v9n-JgK`f7Z_F-2rCrA8a-Aca#_EDTo&z2vE zhVGX-kG*H!D9)h>P(3wca#gaM1Zhz9>$#U=0MN29AlE_xJ8VI9V<$vu;%=Sh@{I0X zt^>ljHlGLMF9_;-oAxt6FEY_0qoHQ1NJtxW-C~#H25o^t$*aFGBLA8tVdJ_^DYkJl z&x3`oS)w)XX^Uib;VfK^XE(}zW=N{nqD%JtUbXS2KrUAnNzUR8n!ImKk ze`UQ&>Blq*4=h!TY!+?UAv|ArzKWNI37u+tBIcK^eO1SaVos-7EXXP(v4;*dPJXuHg)e12f%P5ojY#TOrJ)lLU;E4vDU^*BpMhC>9xn3 z=FgcgrzIlER+Puz%@C-LsTgO}wp^WUm#MgHCuP;wgqeMpUR04F<0k8BR1zjbhg29{ zCfz$8OB;|+;p#P!;O@7XFYVm*OFUYjI`=y;1 zI;G<&=6q8FD>x^{Sr1mlhkwhEd0^Rj;>QcC-fAHIzyRe_dH;j|K}zC*lc;T4G_zc9 z5z{9xfKN;sa6dKd+zBz{XZa z?DcF?GSQ#&%RTcRJpw-OVR_QWy zphL>$r|!Xk%1?mwR)7O(IL%D@3tjnteoh4sd|Ov$@7GU5J%G@2o_<)D1Sb47*HKNk z=B#z|J%e5fSb+HWR(r;s2)7y5*yFT0Ek`+HAV4V>Vd*h;Cg?GZy4D)YA@i)c%Hc=Z zL_O)9(9BBuc3d$-zHW{Q0YYUzadlN;=-T|y(s0h);f^T zWa+4F0u4DSxBgKE_`Odk{a(%E71{7wkiS0OrB~C?5OFRwF#X?w9Dse1gF2@8jguCi z5~npO=Dr#UjJ3C3?kxTLmo zBlJ?#<<~&C$oGsoc;0OKefPo=HwrFrKw8@b+Af6e7~0qfaNz(%^J$zg;fuwQZj@hh zjE1G5O5y3R4?Zy-8$(kuu@^sw`K9V;UM8}O#v{9c2NFb`t^7WHIEuCUHx~fN+QyP3 z>>YsCgd}LZcK!IVrvO;|8^;@Qd&MAy%d`EN_jWqdlubfc`0rj1RAhJ1P2Rc&Iv@xB z0`tz|1J<8h@#^o{c}Vk1{6DFnga&ok~oN9fDKkR`5F)m42Fu`iz6-k&xt3U5j$ofQ$+-F>N zp>})KScZ?F{h*97^I@+_;q+`2I9@RsxnwEONN-UI1u?r5e@kmz9<6keP_qLt)Bea% zuFiV6)Km$;^8-+2+NAefxFldfWw~w}Ma6U#oqy7GuP~ha|HAOT0cA^*xli78+7K`B zS>{ejwNC!8oM0B8697&DqZk;@d*hVc311@Ca$-0rpn(tOtZ!*(l9nCil26(12^FbC z?P|hMaKBYMLD`aTwb#YE?&Lj{P}~ogM`V?qo?c|oEei_D^u(A|eY$?P(rMCHzpne- zuZyTAC%N7?hH@P_zqnnF0D?B5MGG3pY*z6YBsTTtm41?~ryJWucS!9=j0+!ab98iyu|XG4lcJi6U^r3)_^4?@74?KS z3o-2`_D15aNvuf+Smty9J-6Pg2wqYxoB`2N7} z(>v&1X|uZm<%N#x4y%2#9WovcHXT$KM{;S*JX+i}nU(EJfuZ|@`Ap#6?m@0j_S$?$ zY?aH*?e}1W@*`2aZLufC9`v>Gci3+}T6OLHG&W1ZVWcl6CZ;I$O+G*V1hj6!yl-^2 z^MHk}bD0wte6QYIf-i#l{^Y16$n%r=e=b201k{vzkXp~n&};X>8Blr^{R4;u|CI}b zk^Pf*(UJWT4Rk+bcrYCD4&zmhQZU?}^qwLy?tK1m&bI{SLrv4ydB&9|vOXDo8fjN5 z3_Qo|UPxm_@8g=Ue0#0?BrC7D6j2H|!rc@9{dXl86L zDRwlT6^84UAt{uD1_s|(yN#x%%jxN}Dj75f@vI#DGdM-th0~Y8(E9+MRtB~ZM)5>_ zAf%H!-prUxV>+~q~-*;c(0zL-3EU6Tv4I#u|l=Rr2N`DWqIo#kY`J~UnHdO$? z#>?fAq$sc*k|(xcbXE2A%+=6~w}pJ=4O(o=A8hvS(rhVq2K4#{^jl7B zs-oczM_kVXRi5j38msWE#!QJ>qN$;Q4Zb3?sg)e3+)!5~@C5Wzj9WEW;dVq%<|AHZ zQybLP1{+MZ?5UsfzY3HOO{RlafE zzE5?&q}i49jBiI6@%xE_Cu8gl$Qyvws5emU6fD%J*Ks9r96)}*7W<(37b@Tk1=|V`33gM*f!1oo`AaF4B-0vl?HFRL>H2p zR;*wConJH6fg=!GDs1$*PYf{3gBo)0;L)&=dpNi^mvc5Z>>i_+lvdhvbva5oI@~rvw@f#6 zE*z@6viDxbq#_Ax3^%09`C+GpF7{JDMvWi{AubNbq097W`BybIRW1XQ&ihqP<1bZj zmnffEYg1Qg_I0}R48~e%ZjGljjgHeAM^iMUO$behOZ+YgIa4FYo3vLH+*9K*qjJku zuU0KrT~SHXxp%MT=T9ANZSATbFW4K$FO&t9sx_XYrcU@dL=<5*?GMNiWyg!kq`&dW zysFN_MfW8G{@QD6X}QgB-yOb>Ba*|&xGSwSF~^_-swPSVG{l%t{6r*}*bo3seP8#F zvQK|tzqlJ38ba==EUg{^ZHS?VMe5{*cNX61;BjkG9!8xKoH9hh7bSWy3GTjPi`$+e9uQbhG22mJE$<|_>A@mL^XV0cp2Yt6!U+CBOOAhj;{YP?{3bT7Y}NOgV;hWsGGn@gJDsu{36k_D6q?Kc zGXlH3Pi!YDZ9>Z0g4KY)dk6Ji4TO1WI3>Q7>>@G7*{p6CJsA07qZt@uvxO+ z{nDoWB%5s`b~is9oOA1&)i(0r{ucn?TDC}Urr&3tE?v$En|m8eM(M*tY1yN{p5Ag@ zF=E$?{1ASd*h{y52XAM;69CSDo1NDYoLsbGEyngOv2sTr?S z?Ro1_xNj~uRr4EQ7zzgZ;93y8z&#PYm5fa}X6RWAe)`qxlQ6JfT>=HMH+r@SAjk^R zxnQbEkBnaRgX#ifBW_<_Lx+ci{xGKMZ$C(HThHUKb{y%YpkcKDAst8e8DXX9S^a+s zDG(q5no~nkR?0Ijap`Pr=f}cjjg1TDk0%4Zw~V3q^s}hkerlu31{8|E#{cIp|39Ba z>j=@jH8R99(g4IJUp#kABfVGWS@_0M_@jlLPz=URTjiY6Gu< zQC+kN2c{HRIhRXx|4^Io#=^7jQ|rkes?p^W^IN0E&yn7)Mq`tpRl9xpyA2!Fshz}- zD|qs3n22q4i?iaBJ@zpB6dpm{OnYL`6Q{i2uf4rJXQImt929 zUjp*NFCGD@;y7^pDmckK!}>PwbJFpBF|OM-;tP35sn~Nh3EKj6W`I{Ox$QDJ%oVW4 zyD?Q)25ekeHKq?&*CehNR?DhyFcXA!PQKR(Z>$#mT$nR=4j#WUi4U5hXTIq=D^Hgg zIs}{|j(Hj{Dgmh6&}kkt$AXg}L=n3KBO{{$1aSB$${OPiw@iQQQ6X+r(gt?~lcR7j zD2KRp%nR0=?c^b0;&;DDxmW6(~=r>)3ah>q~mF6<(G*S3+OD#`FE-Tsj zw$tUHV~8GvaGCV*#sB*BMQ4ZR^VO1%^m1e!3is)rzECwTwmDO2EO6m>dOh0Bc17ye z$8*nx{_j)R*LQYxNTkxt(ebm5t?kpNPbV*BY&`4zEqg&bRM9f~mcz>?ka0qE?y>L! z3z$8~2&e@Q!GuO4$NFlp_{zUBr$0KOP7M1XjmDlxil0AbMG}D93eJz=mwHkci}NV0 z`7W5I-iJC%JP`SN)?lMPg}LLO|Jo?%o&8x6?V!*1v3cIxM^M+MgEV-N?L*eE?hvr6 zZ;C%jej%@2KtLe#xG}skH+FN!+F!6E=oONX9k{(qIR%JI`?CdaVZFwQC%Xs73fFI3 ze`ljJxR*O8a!M$gw(qyctw}sJo3yKT+4YVmaJWbW9y$|SY80^v3lif=r>(k!3%Y$E6L%4;<3W?o3ILpkNLsl}Ws+b}zSN1#a5OAWr9};t_ zoJJBmPuc7`g9mm56LVlY&mB)>6M_sHb$jl6_C8@M5r#Yw{AWmRk;Q4$5Ork#{X+iH z2(_D#M}Q%FCSd!|=Q3c;`~v?R?2=2_*_FgyRnl1LGVyh*c1P_cr_=a=?ud*1ZKtvG zqALDcrz7!`j&p0*UBPL5U9*()@r{3k-7Gtmd`QA@k4jbE#ABRWTFo6)|xBx`j2G&}3Q0Q%AmQwY;w(6bP zlWkcMs#8uLJOLRp&C}rnfMEW!oMM`-gMlr)I>&yP0w~x8slGRPvV*jLRP-%(oe7)i zN)R>gNt6!zRjGwLw*G&VvVa0@m7}*eM%8~f+yza?-CH0Mq&e}b!&~wXWp4sxb?CsBt(>90Bt|6ZOm zcyi^B<^%_s7S5XdWW${Bho6_8Mk(ry-SyibXvx=HEFMMxuEekD66Xg%!Yh8J^L}v> zt(YHVJKh=|@awQh7_;IlUEz8Xc5s{}LoH-yv>URF&80ci%pi6?3-KhY_ezC_>~!iF z@}ei+&y&SLN(K_W<3+F9RHto_@6W4hIun}DYQ6U#PknQbuesxDV;bO9@#7wwu}8;8 zHy0AK^L>5&=Z?tzYb_4~rs|(p-F6xr{{8f$QQv4uJIcZA;<5uH61Y}w3M1iZ0S{e> zeyIO`A#Z4gu1*5biGyG`0eDTr^`0*uIpt3m)K`Fv+IeFjxb|7U?UbqET;dg8j8rX~ zH)Dy0pFivDPt(h{OA|-_2d68Q{r3)D+!DR4qOsC$_4o#&t^QH4nhC3oI#r}@` z+As+80i{&he_FmE?-t+RxiywP~4nrnah9P zteIG#W9_fzrsg2vP;1!)2GnuD!3)XBO1xlgy`*&9Zu^NDVb!B3gs zUb6K&KOXVm%5Qn5XL7-J-a~}Irh<*BJ=2GUKyf<<_kJ&rHVMAoqVEw-*Z+|aizRLeI1TGaX0bER6o;f@kl5?FK?;oALPNIf5khg7Fb~? zt9R!d!ik+OmbX<#1w#0KAhl{;dTFX|ySxU3p$T&L*Q^`ZO`?Gld*Sa4s`Hgr+jgQHNvC zZ3O=m7?ymUCwJ@zJ-O%?JHWkNRCM9khx);Qeauj5{{bKLQI>SJD-}7exTnG;ZLmJg zdn@_*r+aJ^cRp&t2K(xt{rsB;+G&312Qv|Xv&$tL>dH`9mO!} zN-0K#9Ic<-Kdl2bSFVsLEXX{Q=zBAfy>WAc-y#V#OaX_R>l-|-ARlegj*G8~yElTf zT_KlyD#ico>qhDRRu4x~%nN4SWRjP+H>x-%rZe(n%>uV-L<0Hy`7QX3bl=|US@tQ_ zgk#(2Om5lD!8Ts?c`W%X1^?;s0rIC>uxgx*58f`ve~rh*fB+sw|1`fpiOjT=+nU%! zMa{U?=7`ovuu9W!qWe_Ll6?{xn%LbEc=UV~0zW)w2f2VA|NQ6%OFtl@xA~=C`4oBV zgcsAYXngz7yeTKkW!WqHN{=FNa;yb`pu3Kivqnsk(VdWAJQcp!-+fRnKu?OymStE| zN;|zu25%1*z<@+1I)Hk3Gvx3qfXtzE=k z4vq9NC|s+Lx>v2xb@$eFC5N+IqKKeE8w?2Fk6cQUjdGUm7`>~V?6tYEmAB44MY$7f zPqcXla>TNG}_n$8Mp(H836zKCNURkvSfi`=t>(Q)=uI|roYBOx&nJ)dGY>dyno7|PXSN!-{ zcRM%NNw)t3!{wUzNX{t1lKDG6^Mf3H@bV6Ew#?cOF6_puUS=`2nLUOV&j~+bS3^I5 zVd%{uh?^eBu1`#nj}4QZ&g*X0jn?*wza6gdaTzMPpKPAX;=YS*FJQfVb`@Y6w2(|Y zhAIHTkv+Xo)+>{x_!caLBu|?PFREuRma;2#SFlPtZ!;o3iiow@Y?Syv^|B* z5h2bQw~q57_o}vt@F!7p2}ok>0n&NRI#6(5-bTD5euJ#t-`2Juegv3}o8$jzO~lM- zjl*__)6Z`5zpMgy!j_3Zj+kW_(V^7L8FmK5@Y@Rq)eb|zc4t5%fHhBXxLZkS#K<2r%|1I47_i`@Mt>$ zZ-En_f6JjVRI;@b;7_#cL~tDriHM_zXixH8=bC|SkjU>Od4@s9k33Ndt4JmemV_83 z_Hij?YUp`+us3`F8Njfltj~>Pgi4ATi2l(fA(^V0BgU|RuCb}q8CB6$aTAD_h!9vj zQVfsjpxXiv!tNq-0aR39GPvWW%9*q+(XH9IoMtQ!3tvogvCwLHK#PQxqvZ@+FJ|P( z-p|IA?54 zeQia}9e{JF+QqnT>K);Yy=%*E>B08cPyi(oSW-X}_96WAZG0!{tNQ+v2Y&TlxHIZ8 z%!22`sq$FY4jF~X?47u>zM3SDH@V)9j1lybwoSR~n{#=%7SR@=-Q7nr>Np$nKMx)U z=N;ss%BY1V;eD!9&Gkji<~34J)4eTFiH^e-E$VY>_oRIr?ahOJ&w5Vvjta%s;^HhB z3UhbP2X7ZO@A93 z0UChU*hm54h8qc=dt+bC#tUIl52eKD0DSNY?p9-zu#Djl+pu2kN3m!G$NZF^9O+0o zQzZH0H-W8e*^2gAY#g<*b!mGO&xDuV zK<2v2(sYBTsgGJ*5ZD~O2?(S*K#l}nS-oL+Z|f1F{v#E=k_%0QFsrQM#ViDG&_o@yIFB=%J&X*y{*fwfsdKu_tR;5*ru zW8j`z|1EDc-C)Mw%G6EHFP@p2l^q+*mnRC+nI_3exj%6kW$GMSYXWoqm}!Ehd?AIXQs?+W%*g?$hu)+->ZdtN5Ps!Bv@&D)-Mh=7%%Nq<4 z7;tir6?IMNo<`9qeWm%7jfYP`-;KOissMj-TC4Apsg(8o8N% zqBae}k%e}u_rY2u41?2vg0#%6y@ZX2>1DCdIZKU+SC`x)E( z^0X6oV|xx}4DIzbrSyV$H=VZPA zSQ8cR-)O?5(fLG~nj*lETd*PcyJ!?Ogb)|cgspR~qayfiXd~g)Xl~5wR`aUI(qmCw zMPP8P<&RD>G=u%@Ly2OjQ@-ciw|d!8Q)-!KO=ztAk-n#J!|lZl*v87z(q!8vSsLQj z+|JaW%cJKuq^PGZN8_RdzL*x)!rv|7rZc~l&)#q08l@V;c9w4s%BQUw&IEW1-<&49 z{_8pgnDFeE+U4;Yr zptTrIxX*O@2=!LM*7OdCqMvm2@~e?MMT?}4pB%3JsI74tp_Q{itxR3O7Qv&10}-`Z zG&UYhC66Q$Ea;_gQ;lgJ8jDsuhZUdO=~3bQB#8UyQhkGWS`f28*ot&AQJH~iJEhs) zK{C*ICM`wjK^V@31)1Ht&Ug@NmHO(68O1@ADps_5<6}~0s;6|<26V2yooQZCtc@)q z#A&$x!);xMO=Y%X?$7K}ObHx%s;o?5tZvbpUq*sU%M~O84aH0!$_9Arc zJiDad#em9T{yzTC`w9?+IbKVNJ!%`?;3U|@7(-W2L?SjZ^B#qFh@cY=h^0&|G*txm z;lFPg%~1s@X=w%!fmL}`g*_P^Dtw$cf&WpeW!45vV&j(}5h3eC$Lw0yuFCZmnCT3^ z7$hx{%9S~~@RyTMiV+LaTota)e=5xOsK`3wDeWMqwf1ZQI+(3NP40u7o@PA_oYbse zMd&Qc%YyHC;TX4Ibuz{a?fKhyS9QeSAYA@8?`C-}r@8H5;A0y^)XtgTWTx>X2A1Ug4#B zP?v~QE=a$}j^$ccbn39Bd!Pl|xC}Mowj}5$A6+A>;7(DLDBp0uN2l%bc7c?9Gj(Gv zg=fLw$4L#l^GI)v@_;BznXK@X?Jin*wUiSipooaOdSH-XSdx=wOT*4~iMyH~N~ zMyRyv(w(Z4n%W_S#jOpMrPnw!L2i?EY#IAv?WrbkbP-O+Jv^s^tZh)2-^RFZY6s-` zgB3if$aKyU#V*2Wt?u=WQA3}rNA~#p+%IbU`&LFjSQU70PF{;49Aui>N7a$1RVcP; zLJ-AAm6QK?M*q9IJW>r^CDQOw-a+XfA&)&qOQ`^lTdWl$o5b zu_NLL^E zBUzBkV=CV)gr9vH<8(->t>W0_Y?JT(rFVGp8yfPb7hh!c>U_@Ej>Ka8F#9pbCGM2$ zuNvTyptBQS(*uam6zw?;Mzo;~d)W0N(JoR_mcJK*Pl^7K?sH(8NfpAc+M-*MTL}ol z5kjPMC`t<>Xc&IR*ogt^m>*;!Md!NCD^pcR$MVE@l83NL{6-N4=Up89m&?x|4R?Q@<$x%4Sy(-4+iwca*E$!6fXPAnFMg3tS?3E?+5WtgUNnR)pV9 zlx8)$^n)Nr+mQfZQ)tp&i&fqh3?<=Jl+ePNhPlmN>&9fY+x4^Y>hr`92Gt%19^#G* zb0%2g(;5==Y|oZ>+CBSM*|e_&3mkzTH&xR?VceLwb$S_B$}@np&{l?AEqq*` zuJxR?@=E0DPtEAu9rrfG-KP4 zIZ9HpgyN1Oh;@%E^EO=+Ew?idXmK?-aX{hO@ zS4vJ{i+8&{r69GQEXs#-GIA?`?bJBG?(fERRKSAe%T{i>reevs|3C%*!>KBb(4+mM zfy&g9voA2EmAkJgkbLTgs+f3c%8d2ufJ<%2%+o3F+!UL!9oxb``A^(oN6{rFJa2Btg4DSfGl|WK#x9?& z2^XV_*006J&hLeQu%kPdnc-UP{#Y--#W8c$P5R4~%(q=0y>eXby))V&A;K>|Od~qr zRT`6{sv>*=8AlY9=4w3-k05u!rE>Y33AtO3xKD&Vs9GYWH`CJTFtvhn!+}v#B@-0mpUl!=4P* z57dKyb&ji3J!M}iOe`^+R)x{Lh=7A6FLs7X4F5g;ohlB}Oy{#=J)fY;cK*H9rKS3a zr1}>odWQ!9cxwuKx zuPf)^E=tc=cNhd*1>D7__`8@|iNs3)MM$Sx<>HW!n)KlvHwe;RYbGdZ?cUUUcZY`t zwMBIXG1IXwj|+_FUjBp@0h6JPf+4+4>oR+|OZ&zK$q7 zlI}SzK9ye^j)$ z7^`(@p;xibPQS6eYZJb9 zYr0u3X1PRuF*w}qg@D1?*W?s9zr@V*ugewvs&W)+b4cdgx1gul zuclkcv83U+srMhqN;Jz#M@#IaT2?#|Z8hog8WWb*p`25-Q*$|i-)DoJJYdo~9t>Rb z%|_=ZC^{T+K!>{I^(@J$zH2R{bX z25Zoce{=V&NA;eME#2GN-3jh!K4{VmZ$(_So}9BQYdI}wlG+X4LBFkWJgFJdyb8;} z8{1VM2Hq&`0D+hbqzs&Lb#!@L)wLh#O_VaUFNz%0GBJy-O_EBwb(1)8EFn7aN$|H; z2wb0bt?!W{HzyJuUeNy=5MH#Eso5!9x&$bmfV}O$U-U;%Ec;Uk0_-SCKM!DB8~%r+ zHVkM1?~}iuFm7JvewW_a7$*%^mvpfiTQ=D~ZnQl2<^ByCU(V(+FDj@?R7Ps#u@B!) z`G`!4RgcdtdF$fkmz5FI+UQUBp2d+cB9vJfrQzM2{#4h}xL0H?t>Y$Z{M)$oAn?^# zh+KB6OKjKC(|2zhg(WWY!dN&}9(P|6+39-N_2nkG%10WZ=~-H-P$#}<{{0jiNeKxF z>sammpH<(mBVNl?zchcs!mWGM*)pYEv*RPRQi&KO(Ho~OEq_9yXo`uy=#L~_SwrEtN*cqHU)^eM-eR#r}e z8ICkzt1+k0pM#G}yEfyU#GeI#CH4SA;_|!*J4#6&uqPqrJsn*;)BVT($T7YeDUK=p z_|Y#axbl$cs`991C&8R>GIrQT$z7kR8482IgS(EZ^Xgmmf`RWX=7cyBP2on$LYB7* zGKsljefcv`FP?cO0TpgH6P~Ax=vFNl)T2Bzl-y4P0?)I{yn$hgQ6|xHN9K4-N6BP2 zNC_)GiAL;yCbB`_iMa^7#UfA8JM2)rqB-^HOrUq&x`8UsNP?cq<5O=Ro|q%mQg&Z|kA79)E1AJN%l~L1Va5 zqMvTL|6RwsS3S#LTtXNpPtrv>|J61@=g0+S+hx0s6y(?Fy1JB1R#*Cb*!1C)D0#H; zcr^FXKI7_HaA3P!QSt#HpT(@^+D;ICt%K$IC6{93BG--b{x+U@Zbh&0@4nmI8q>F? zt432_!<`J$?uGa|juW0u)mZs}+Mf4=0(sKxXc;m)w#ppMi z`wEXEl%ixll$fk`XQOHoe;&VT4B4WX0tTvD*BOVo)9t*&7dt@;)}5$|@Cn}5t$ZrU zHs!*JnN=m)zZ^oV9v;w3G&Di)4Wm@S=VPuu3 zXbb~sdpDF**4ItMxX!t%p=x!i(N%YEL*PucRjq0z!K=J4E&4<}KqX6zt?k$R*kp;7 z>*0NR)@!ZHhrjG9UN!pN3hx30}ZfT%c+nH@)|i=q-E zSTX`tB{yF*dOG))2Hhx{>vb&ju4L=322*TFUVC59kf7f9r*>!9!FiW zkU;j|C2gz1l4sc@5JcA7nqpmgamG|O<0gvp9p_#pD_{M&yDuuB4?`*nQhO9z5Nw_N zPC^gtpLR^v+1m>n*U;{%k|B^C{U86anq3to-QFb%^e11Na$C9q@qH9?3{Pw|2z4i8 z!KQ49t76C(3jTGa=|bz%ZL)#tPJ62xr!@>4w^mr4j*6tY zAjoHq;a01!)?)Wac|M1lw{@WS0Dq}O2ODqB*F-Lb$yvX}^ZZQmxT)sg#*~p9Sw9d!$;IVsfXb2HFl`jn)*iGyZcs_{7VVpMzl;Tv02kLOHnmk6!RU5+qc} zp+p%O1l#-iMz9Pik65T2DFj-dvVJywEr2ztNH44B4@%Kr2_+uJWkJCr?Jobo90Gy) zSesVP@c-_BQi!d-Q4h6K@gjgFU<<{x9MWzYXw6x{4;26DJcv?7 zmbsh@yqF`-@v+iI{L_umye~~>KLl_Vd$*>z#CtsHZk~*$h%EA_dk$P4k~8Sz8BSeR z%y+okjcu0FS~oE5oU69)_C*&i+pSJ`5_O(+Ox_P9OTjpo;5o*qT z7xBIFkt96msHeC=*%no<%qtNzrcW0})*Ug&W4&0GZYXNf6%KXs1j%wS7=9magm!7zNeL`bE%eE54Gpcyn)3Bms8<{* zHJBVoe(!T>`-YNtB47Z6b6fd`;5$W}41E}vV^qppDgho6o*D;7*aK-s=-INO zM|&5qC%jK%2B^{VQfr)N#Q!eS8dut9XrK0_7gsanmDWYK>?*+$?N=G$@(r~=_5^@- zCh&<1F=P3^U&zmw>6;|bXOuf=e}rmfK3EZks6{oh&B|bOjQ`8nrvk%>ZH`xji80%h zx=c2AJ$9aUh<1{yO1);f<)!kVo8*N)yPAQ+>hemzI{NRYgXc@^G_PasFwI~|wI$sm@@lp+jV3Ojtfq8pc+O(&Xv z&jNa>S2Hwl@~VW4(R|8)akV|soAH+h9n%lUQI8e*v#}QZ2tBsPItdK|n?bHTwICVD zO5``s(L*P7x-G2{Y1QmAPJi~}oS$bJ&CU!}D)_(ZJ+N<;iYz=k_O{{Q@c&-*4vdSwdF2mGLh@lr!QX z{CFufyD8i0B0ubx&nSPMf-)&kAA(v&^XbMVED^Pw1YwP)X?DzP;&c3GRpyhu70Ova zX8h+^%~`Wv^d5TEP}SaQ7}6OGxjxS2o)f(rvp>z>W((H5&OU~)US-PQ`jmCjab6M1 zogA5y!n~%frj=;E6at}!sVO-MvLb0t6Zn+M$szD*2g-K&WhM>`SaUECegOtWV_S$J zy$P*K`dvg`bn4~_q>DtEe3Rv7wiQWR4i9M<6%217Kyeba7=ex==0lma9Bxg?1%k$i zf6aBA!KBp(iV%BAI`x37n0_|}Ga(w(m2heCh%4souUr@Nf#MT%VF@r5?zwzgB{ohc zEtDVC6SXz`xm#{`TG^g6( zO1h6vHiIcQPm-w!yNCdxZ>!QS8;{G5wiNO9y7VJ~<7-nI4H0vS-ODC|-K^#}hMTZ4 zh{65Ya7x-pp@T-3KOgC~7LydIo`qPF_{vVVe2i=j8&1B^g1mfNXKw=$pVnV&&}}$( zD@aq!@FIi|$o&-yauqE%n8hdHn3}*~Ytm9UkQ&$BzVSE^-S+nrw-HW^u@#H^;Xm`V z+Sw9>^^XJmcKxMnS|xu- zH#RCfubY_}nvuu}Ts@`R@0jLZ0l`u-b}L?NNmUhhen`sL(JQMkGBUo@ACS@ZRd6ye zEI9vzM``lui0NjD=_6Nc^Rj8pT)=3Rz&Vx5`u(n?xUpzFjV+9ZMjxpd&iN)kln)~B!3h%biG<3g$=C&lm7d3uKwUpxaFiWS5N*AxvpJ9nz`9-yMloXL}o_wnZdU~WF za$W0?Rk?i$cg2P>Q5qt}eXIK@>D2teL%7_(~oZ`s*U`s$1Ro&_P|hlbwMkabN5An$tJj44<=LTF0)kv*;5eUlm8qlg7oDJZw(&mbp>*Dg_6b1tOw}blp$YZt~Gc+ z0MfIyQ|kH6TAq-?$ z3w1JbNg=+Bm?~$A%gMJ*y}FZfZmuCoS!g^gWN0cacp!Zsj;IT$KuU9c#O(qVkC&J( zytwZ?5DMi&MSiQ#%77VOl+e;|)vHhlr;&cl+(E~TF-J47+(^Qi{6bDQKoJrqg_H4KMXySWONY5U zqlX>;z8AxB3D$bTs@A4NIU6YE7NN{VV`9-jBlPhGL&h!#alncM)Le|);1g685)wke zsNafuPp3$4$pZ_A_z8vIG!#VXC4LbGH(fmoDHCQ6=O+unZ;ZtaTb`ktJI3#k5_0|h zl*vz-#5B~f-?(;t706dEkHzmEcNv>P@W&E2?eNYQFsL^QWRc{N3$Jp%@ah}J1 zY=NycF)&<0+B2*e+|s@8|FwRmFyT&0V66SCI5EN*91$TQI1m@p#5td0`9|LRWz6zA z+s=!(pPw=$uYURU&46Qf{W@V~s_d?yf_^fBvrrwYM@rzM;`3XtVG(=Y?)QoRK3TuI zPsvyf^2FmXT-(@*olRXyukK`s&#WbgmualjOf{!EgVa5*OF&zu=cO=@SvYIKolugg zIl;01jfH8#!N5#+TC>lccUSIZL_AR&Ry6AT8xqpYXwTy^(9^-|FqSB6&U;ga6y+Sm zW>BD)R{20*7)k9fbNo`F&?F3Y-R3n(0M(VEwB|iSJyAL=tnLW{A190Z5^CNFGzw#@ z*vAmcv|_`ENbQ_fd4@^7Y{ffWg^i7r%Tm{en+QX6b?698dTG&ObJ<@(d2x6tkMrr0|p&9?(Ty3kxV6d`36ZLYj8 zo%9+_!(zvT*|=sy_^-(Fc*Qc4f5O!oEBdf#0**md-uakqLhU6`!PZJJZ)tY}JmH?-ow(3aVb%2+olM*8Fh`y30NP*Sj0!czHZK9qr6-y3rF!dw7~~ zQ1RlMt-D<0EDeVR50w0XIh}A=*Ye0Wh`OmEYk6osduqKE^}5aFGy#V(q|hxtm7~J? zmkyS^ZV!q(x@IEinx<|S7&kwmk)~_s4<5YC zj{ccwP~>v8>_1rmPo5MiLuj1e^JH~-7W>bvj2r0a-6Viks8IKic`B8q>y5L&h*)>n z^S`H=Ml4rDmV-FpcI<1(C?3jeetB+IXN)LePc(pb_ZwQ;)44vYjeHsewBmPqJM z$WT-iYQIR2bMWgquc$UIw}1)OA(VQ9Pf!+DwN(r5OSBnOrxmfY=&#*suAfjGZzNrQ zR9;`V!*Cxjt^9Y2^NHXKuqA zNvF4QZC}28P6@nBM?O}%al5>sLcPp=%Oh)pzCv!Nj<3@{r}S^%W1%z)LWsUw$Szoo zhW{J({HSVWptGnGiao}CXe!~nJyK(zI%d|On6JlH_Ux=6N`vZReOxMEE8GLknigA5 z7|H27Fo6DHo^dLu59v_~qD;EjR8Z&rUQL{u*}MhGN2ODL|3Tc>x3W!7&_^Sj{H+I)x+F+Q!9PZFC_VeVu5G+jsWnJPTnd(CC~|)mLWxjW7E6rHM*-B$8g{ z+)6&wGDwB>S+3PXfi`w6Uj+Rk-pWPc79=U1WM_k;LLl-f^XL20uiw7?yxrub%=zXw zez$hi(HDkk%;D<@3xk{orJl(bBKs0? zyqwNvq0Rs|U0E+MH!~}pn`4$Y_^C^(7&*nPI&_xJ=SbZFTm_$V1L9h;UXM9aXMxhO zmJ#Ml{qdS);acXHS6u(Kr2=dQsj1i6>g{maRc5fl##5v0XnM@(YLV?F=9d!mLcF}n zkyBm|3*WqR$t~XmktB~Qq$0hUcZaC5RlKO@{eJ>QN+IbvW~ZUx?8L2al7la@O8-nJ zwNY)#f$#fjN09-<;9;ek(*r)0CgMxhGE;wn$;=5pK|$RL;=uC~G1jdtQQb=2+ocI-+lR@hzQr?e95u%k(_e;ch&PpZ2yIy<06wNw3ejldH zXpOe+u+WF1o^W-c8AUi)_7>|6)=;(0K&bOpCUjMBs%^j1=o;2epHhNyH>k;L{UyGx z9et4^9h2z?OsoeX4{wwsKu7Gx4vw;xqYodi=BOOyZd~rZc>fr$5Z$RjFxJVsN$!;M zH2l-TwxZ>4DE8#|^Do?6>3Q=DcE_f2K2}N62co78tnulhKQ?8CZaPc~oXw8So$@8P zrl1w5cl!FSt0#wWf^G2K{R65E^#l8C8k9$Mb2##a1*?j!_^j)WtE*m3=JkGxYZtj$ znTy&T%q0A?=!KhP_XP_J-kWqDvY_l1mMRRcN10dnTnah#EsL!p*mrhnZ0uiNeR`~_ z7SLIdJ(e??CTV0mD0D>W^{z0v#Zc{x`M1QPlDbjkA>!cV^@1Oq@{^Up;1wLA(WxwM zc6~wbKJE)V@J=}Exb4?938uC|RY3usXHpNoZ-p$btkScy2nR=3AF(!{dNtZg8GZ|5 zC_rU?Hlj2XJr>VvZBvNe-S?esMTHzw$bKZ;d$G_;XqLvrha#5WE*tV~+6`ojke*+k zYzm=zytesDspF9L3Bnq2Sa^3DKaq6$&8hyC`z4$9ma;m%N^+g{KG$zWr)4Y`odQKO zTVmeSvkIA3s6nUPbMT?nS&a|>xz9r)1bj^{n>*pe7<$OuK$ zzj#|S_W#$BTcD0qgW|C=PE+_8yiNMWsMzxxg5RZ)lM@rj)0cNmdPUV$5v;P1%ff|- z`eJdJtGMxJ{|-n;dhc7xSQN5K)?CY?&8N z29qX6w8A?o?9S`aUz16>3UM^+7@fjraTM1w-=~U@zw;`~PnGBz-|mkd9XE0!-fON9 zug`^F6tV`cbru%Ag}B9yWC%yj&E0-ElDz$Q?E6d>4Cb0+L7(jR zF%yH!^@&W~%oam71dzk@l{_ax%*>xX)zmy)dD z*Tk*FNE_VDVsjCP@ualP&COr4wyfri6gNH;WvRGQLwjB%9s^U<Lmo z=O$i~ccryKInIgmJ=xaN*u3FiUh>V>(>#%!jDGxL+**TZ*$#7iDhv53hh2!qy!=%A z%@3HIXTcPTV*24=^&IuiCF#~ZkYQ62Y=eg31}euTM^Qc%I)^#lRi+%OhtUauUlI9w z=$VhrF(R$kr|-RKnKhm6{d+ej_=;-cIb84SvlX92s=j%D^?5}U)ofomZ8j5!TxI!p za4#M4SwQfC+(b3UmLV^%-!j)AEW=qN_1be zl*^ho_#v>^GkR=ha4I5|lB4 z9Zn&!@>A)N{{@lE#t0@&y;yy`^y5;~5QEm7Tt39%mTtR8h^~8JP0SQ3Ye0W6C`cs} zwrPdtMi+L?Too~3!R_0L|C*gl9MLOnKNVcD4>bM=Z@e6e8 z@>?v6F0dfJZrYhKKE9ML=FqF=9B}OKUQIU`N=p5=&UGR-Hg%}N@$CA?JNilw?(*#k zD1~*E4|o=et9>JUt;e*K2D?6Y#IutUgs3uE;WtgE`fxdnREj(ur(ekJFG___(5ih` zySox*){tJ*nUT6`olxr7(v@F0Uz5|qN6Hi{j&I*Pwc(idH58|bhzmu0!sw;$^XQV@ zysO=N5c1ww?bPZ=Vp@pqV;02W>hsKc%xQ8rcp3=xWUsh6BwJ~6qH8b4+YIo9|h)%&&M7dI(3zDIX`=_re-ouC`--#eCRDDQVBOIo$tVr!9AX}_&vW*gK|fYyKTxea|AwL+!eC3VE32b zSTRhd?8>qcV-1zhjNCL7oY*z73>Jz8SoxgX;-9}6!Mu948C`O#a<~pfjOS?AdkFE5 z?j{O3O&qVZId`}{6@S4g-Qxf7_0@4vwp-VflnM-u#E?<~0tO|WBA_Ay(j_2W(hMm> zgMg@XC@786Ev2F$AYB89Gz>Wd3^U&~p67Yb`<(OszCZOhGIHP7wfEX=uUWV7MTlX42 z{sJ^yWdpmCU7`uGFY{mQ&($%9$wB~)k^OmMg*qzq8!q4cGR-lfvZRS2YLwk!KTb{0 zot9IS$$V}gq7Ir9A1shiqnvLv?&NmVWW#jPcTD_ zNSZlZsrEx2?Sl_9Um`(QjX=z$tz~0(?)Mz05s6SX2^7`XInd^DZckL(NCg}SfO*lv z#OB1fec6j~0V!DwyOL^G zb;Wfhuws+7{|()(S=3s^;3lc=yufpNWH=APmuTvS^F-dbQq#alFxK__hSjvu%_uZg zU|sfMRS1Xx=_>MaV+&t{Kfmac>ZBk}@@m*kFZFUwCNmUurtW*Wq?+?f#Yz_CgWgL& z1YI(WrG?SuX=N@2-0vHStbIv+?yL&FCw)uVUfquTsD;~%rv7q30{0VTI^tvE7mb=8BEv zE2qnWbJ`UKZHo=_VyC`Oz1PK;y3=9n2U>uFxFlrPzCS#-6s=bhfjg5zO73@vOJir= z?XO>(pPY<>eC5n2Eq7`DIQ`KTxGFns^}W5lXe>Gybiwt_re; zDG^>O=?EpSp-c^p5MQhSEaxY_^WDi`^+_<-r9ytuABD6UR(8$s-;me+z)_XkQt{aS zx5KjtKsHL42YX4LoSgocoQp}C!(Eu)o++Oscv1OC$e8wR_#LUi>A5*BLJCi)7aw(T zQm&d(NZ5f&Me6fgN|hUnpDcuJ_%l@Pq#eJKTrqB=siu|t!PRHp>S-X!;yz>aBB_!( z{7t3Y&4~CqL6yFvHVHFSEnNsit^n{UEKx4Lcf^O^BExy2khsP2t43aG{qvvxpMDWUuRc6 z1oWUK$+Js@$DUgoTS~ssQjT@w+(oS<)ooiZugVa{?A{}1B!pMOIcLrm`tPzbIwe8e z?TLaWwDK9w>C@}pE3qTOE=Vr2H3L2%TN4d-od3KSN+c)qs|{GM`5jH#k1~XIGXHf^ z!Cg-zK~G0JnVWM|m^Ow3!Ii_0OKNlU&qoY$#v5Z}DIYw1`2KUsS%5t zKo8D4TCDlrLu!+{S zOd;R22-rHud)rbe&6e>@+)A2#dF@edMk>E6s&NquQ}1U*r^R%h2efwQ1CQ)nKB}`X zg{gL~g-mKuYUtmkty2%{W-YP~=~C&2W=9i3e$YtH*d@HWuw?U>52+k`rr}KlDT=_R?`EOuwZIJ6q zYDaXBHk0+IUlf|@@x;T%{^)rT*W+4&bW3v58@6Um;ZEF(_MwDKZomp35#s%XGAG%R zs7}yjhS_Im#Vv#qCGE;IVD{;i{>snJd8^L0Z?6>2myVSuD@N^Vrg$u!rO?roFf{Bc z1c<&0olnr1XnOo#3C7QxM5@o42&>UIZRR9F*xsDDNwdYIhRK@vm0?(W$Mr>jq}x3m zR!S${+JFfMKaHe_=<)B@7Gw%n?+8;3Q5k>ug8-9KLxbKO_45?X4LtNVSnjHCy`P0@ zO`&Cn^ZLUXSvZ8Kv=`&~sF$3@@n_`%BcTQjs8l++PaOIeRFz#^M81CgiUgC!mY`v} z*kn|QT5%dH>jXQ$s(>lfomUdeLpQ!Xk;MO7kWsF}NwPM~3=9}V&09*SFfB*-KI&O< z8&b|R+$|e)z6RzUNI-ahq=z#58=mgcA-qVu5V*Ca4kuG>uQn(LFi}{@%|kQn=HolgP)Ng)y^w3jsF4JVM=*ZA~?oEhk zOA)gRe{mttYQf5y8<-UNa`fAPO60-MIUy=^S;l3a&Q2wvMJS&rGGz#|j-BH@9f`_) zMHI{D&S~deKyvQRumo`g5$r+7;C_)d8_LOPT@`I!-bt3uFnXy>Q6umuT8XUMqxzCx za`H=rXOWxR$OU40q`m#)^M$(c&#w@DQ>B5hB=+{IZ&9lqdeHhQrUaFJYkby!<1t-c zzLLMAbZlitUR#Ay&ECh)w^Tz(h3;Ao*6L2FE(AB(3NqjHkdu2;CT|63W2PwX6Q~H` z1vX*{+icU5o;U5SU3~I5NP`g^=BEJ3q*@bzuW8P29<_X|>)`6zn_&?WlP&A}URGQ!A(daR7t9Io zf+aOpz*|pD_4GQ zn1QkTklsNRWo1=MruRxtEu?RYir@}zKY1Kp8CdyWQl#%X2r6kF_l`;nRYPM@L6Z#y^0^nqd6S=DKnv{FgK}=ZQz_U;%vI zq-Odq^Ul%+v6!Bi&-ThK7F1zGaE7Uyf6ImxWLP`cJE?eqe8VpLN^x;B*|(WZnYY@+ zTLK~WM{KU`8EP9_DFJR{lYB03@8;yDM#s**kvkgY^WGULBWav$L_c2P_yx~vGfdRW zy=p$YH!|FJyLR)+>4u=o9>h1qFjI~X8Ft>k2FA{webSM$B3xgcl=DgD^m6Xdy?F{w zTjL=4UYiN^qZGSTu6gY}VzY^D4werUiK4TWCd`ModkKma%I2^bMY62LdwwYnGNC!CD+#s_Tb%S(%t0>1;pKTh9I_7geQYQ&Bv7GV{dIm zg|1-^f0mIi#qS^p3yUDtCrH37umZi**ip8Ee;8q0n&3cD-w{FN}s1sMR71U!OHwMvfBpD+hXBCVvtB#!ZhNfR-Y{R5C5##1^+C8 zpN$F|2hUND3evuBeDJ!UhMWlrY}7sW4Orf#x7S}CK`v-*$NXgTvd?B>17pcc^7}?+ONT2kTaBg(hNHLKB7Q(2*>(cAvjRJZB! z7pl3b;b(Id^5|0%&g;?_cf~1qh{F&6Jm9}oJ%42Y(kKF$VP+`o?%l*yWk6~bMB{d! z&b^ZQ#bjJxw0EVvMA^gfoY0iqy~*DZa~;eF=XyFv`l!w2u@a@9kLI$z2e;_dPyLAz zNnr%g8Fgs3ekAqVo?9xxmfH#~el+ZH&X5Ra9cM1wUk)?lUzw)FcZCeZB`9`&-5 zOQD*XL`^$Ds5z8Pu*L!xtUEb!ERK$jnmRf@(G`!Nf1k{FBk;bUS^2&>J(#d~N^*z|FrrEJz9 z`oyMhe#e7&!@U?K!w7CoF;{KdzY6pJEwSQYCB$C7Nr!tO3CMIzOU)4=39aQ7==%<2h5DQRJJsgde&h2`-%1rj_Tp+l5I5mQ6ppX!{(W%$| zHe_4fLgW02EGx~AdkI&s*?2qCJhxVc2v>}Ivn=n~P`ida0*lV^wFBam#BcsF5q3wC zB@wp!NU8qG91IY1bil}5{qf_=@87?_3Va){m^RSp-8xK*MF$55TNyJvTM{tIOb87G z(UW-`&?1(=nj9oyRV^(F{8;-H^A@A_{>R^5F3i!C`Tm_XX0kwpo(&3x>Z7D8^Nr8R z%G|RsOj`V8?Jqw{u6HaaAw=uAG(y9{BHFoGVQ=W!?--X!gP1^p$DyM>()3pf|NEzb zSL@e8f{w@6cD@&8@}GhtuuFhyU*7#wytci4@!RIg3MW+qms?*iHMtkIvECvaTHQ+u z*Rv#PA^xQLYD&ZBJ)6~sx0C}|JStA0_>3xyED?!qbkdnrpwpGTUX3@@76)a)-${_fw*zt0-uxH(MaCGbh3KREVpCLeKuBe6q z{>(3Be1vYe&cN;ZUvNoC&?+e@QJd$d|G9e1sziyQFaWsGz>PE|Bcqr*B*YCE;ogqI z0AC?!_oZ?HJbClyj3DmOfdpE3S>AJfsg+j^!_K|v_QYX$ML%l?i9%`ksWM|QXFvU( z{GwrDxpkco_nFy#;|>1h{tfT``YCwHO<~?9*5_sD{8^u$(ZOB*@3x=PTr0}UMF132 z_h@X`%@{2p>~wfO$|Jy-JUTM6@s<@=wfg${pP1o3Pr_VUG;rE#w(3+)IQ0a?WVmJ} zRXhtrOi=@QWK`5Uzxk07eW=hb>CnFvcwM}p(1_MT4Q0%Zjz-PQn3^`Y6M`LAoeN#j z235&vX_3If@*VlpZ!g8+DR;#FRR}+$CDiB`+^Im#!=M0Md>$Me-zf*AmerT%d`2G~ zhVDM8sm1qO{`Gv&F+j|LqYIndR)`7pQqbap&AQ)I*CD$W9uyQ=$S3qSq&<0Y7FRi8+Z7HNU61yl}^{&^nnG z8ubG~KtguX)z{a@A(Vio>my(e)6fQIC+P*Mt0IB$V?siLfS_O}m{T&`1f)3b?FQ$$ zDzIp<8*F$>e(~Zh5h2nQy`v}>aarA3k7(B;xSu*g15LfU7iyIB-$+m$(MBO9V|eLK zFd!zLr1>6g8eg4019pg`4o`r+8u`VGtvfQCiYR{ktAH+9O?S1mnQK{1MHdus(`V2Z zmVIYR&u&A~Gle8|2pj|+7tW86Aq!YzGvMg#7y~bLmbwVni#LgQUWUq-r4*|Lb5BK= zakS3XJk6^tXXMzr4&QZEF;%4QO<<3(^-w$7+oRPGps=m^b4HMG3Zxak+Prtk#^$DK zV4!S4K|wOmY=~|xEG%3yI)G=|jo#u6d-jYp2>4e6I+aX9Qu3aKMbh#9iVaYo3q@v& z+mHqY2fH{!lRvP#0zMT1L7YjBi?w5Dp+QJOJX2F@SUKm30O1=^UHAO&M=vJ|9(}vQ zUW>+wAot#B>GG+@J^Jsscakx31y*O&Q>58q=})pwIT#UU*3(F!?FnarbsXf17D8*^LFFGD3rfr{w%9mf)JPV zq2|{19dN>YAKRFKuG4t;V<#u!h8GXV`=>(R>gK)cKW{c?wR%LM*6wz-`#CgcPM+K0 z@WrdUGW*moTb4IV{z5{kh`Jqtxu1tImhJV$tgBDdV1|;IrcA{2Nc`1y^GGs+!h$Xc zl;K%20&G<~H;Z}AiZr{Kc4F^>kE!M0$rpo&?*?aqdu-20XGwVwf!wa9uKuiv_*<4V z9WZ9U&jnLeB_Sdvo~_&;;{iAN86V-Cir2F57}zfe2WwWq6yTHJ$+b@7)}SE7-~Ou2 z%Z-P!ZH@!YJcXS>2bsl0`{z#qAKcv3WVhvMXPo{lpnqQ&YPC4iC8L9~duniu zQIfhSNUqW%{>zyN%O$TlCig#a`S0-Bah0oM9u4l<<%sK%nU3INa#Ca0GK6Eg^54HF zGB7YGadc2|@gjQ!iM)}->Fs|me_E2}Y;N)M&N3NXxILQu_|`2uPovp)cF$kajSj@G z`nqKmY>ocG!88;zL3e-4iH-1UbKiJ=h3DI~;XyK!R_~de>$E}#+^emlNh&@heNLa`lameFJ1HD!)~7f76P*T zv)o*$dJ;#uAogK}X^gl{uR~o_^f;yPB>wB&cSzWFM}XM5jUQoAZGCR6%(##!T;+g^E;lzr)mei8c?`|Eefa_bxG={oh~ z$6Y+1AeULrn-RfCFCVO?zF&RmpsM=JNijSI4V`ifD76HVVPz|~zi;2)=>ZRM2r1lk zyFr;oK13Bwf!^X}-XqD<($fc0qWNV}Rrb#QEvWto=WFs#c#IUvts^e=OZ%0JvFHo=}T{=Ts2ogZfB*J9DZ_Gl!_&h{d$um16UQ0(IG$aBacQKl zz$Efiu5x->#@pQPch=h1uhY{erYFnNVA$O#VpK-ho)f6Osf(<3{;$NMME)`qr?fg* zO$9zA%e(2LN%uqR`m2UtOcup(b3xH3`KPy?gfG3Gx;Hd3 zarR?tnVQ`jw-GWGLf_3y==bi z6Yt54bmehTt>X(GlPKEy;GGbU>7AW$Ls)%<)!Zz3#&w~e(3E^aaw}b%pKx%sof(=!(qiREcwbutN5|5MNe;@rP~qr!9`JgKsB zU?-7bSH7=ExA$Gen8(UacPi_c_1gP4)-#L5P$*PnYBZyLD_Vbt`f#N=1R3OhtqDn<@3?}i~m$)_^y;4{u9&wiNHurylP7tsc`?7tMMz4 za6l++)=#vIw+J_cjg1=OyFpF4Rt zQN%BoDyajqFZP6?^>Ay!RTL64kUZbIYrF$)lpmY~Z4@qRsWs$$fXr(? zM9Ia3auMX_S}4p-mi80q-?D_l?|MvRkU4gS%#k1LE9Di1Qe8;S<8d2TVWs5N;XS4y z`yP7(gd~1CM3?SLNZE%E`ubfOYHD17pb{4sC(lSjdMAFc^P@$mY=R9JOTao)${i6J zYbE_w^6|Hh%8*0hACodyuiouG*VgqRV(bY>Ml-Pl{2KLc^A$u`aBSvjk-!i5Q4|Ka z;(C-~AKCvM5#q1mBf=?sA0@4$@;N4PXJyb-actQcl5B5t{NB!Qe`}nEh5a4T$xd8D zyYTpN7RrFQXL?{IC>=*l6V-fFsF`)@6_WS9fYM$!tLHjore2$Id7{DSim3#&aOA#0 z2dr3{sF9a{;N5C=u2{H3mK}{kwUtVXnG0(P^Y(&vkx& z^d_|w1tl+u;`xSP1;cln8!lh{CiD7kN6pUOTHRjn{Zd%$?miFsqUp|X>_1**xe!Dl zW5nylmrqV{@x^XRo910*t$Q&I`!^Suc3NVX%tJUrup!UOJ60I&qran$o4ZlOr`OyS zin9kMkZ#3E6)qT5^X}=ijoA{i>kL;BxqwLwzCnm{=TLHL{;vYTa+LBemBv5#_7dtiRC3TlqsgAi)QQYuFl1yX_Vq#2^I8}=e83H3c%nAWsg;t$#) zzraN_5X`KG$NS2}lA1b>Zf=Hzn*hcd&v{~TZDZ_3Q#HLmg7Z>e+mjq0elTPs}a^dDBCkr zq7$zoF<@&sza84^=8`KcuXpj>GuAo0i8rW>Ft*At55k$AgN> z)-BtE<_DU_Gvg!LIX$1f?JLP~Yew@thkmgt3M(qoV(Y<9=08U+RC7z!WiYt0Ne+swr~aa`FIs{(;JIW<2{m^=L*5_GsMO0PXF{8bo1#wJy>k`!Q9QE+@yiuS_7*bE~1iu%0LW zp!M-fMoAVKTtC`ld2zEZxHjqjvk$Wioh~v@X_FDo8!1bZ@-_E!Zc5K?7EHHPxdGNF zbKzs+|AI7sAx{orLhSbMou`kZvFoLkKY4dHCqJBA3Euzx{UrXc$}6~R>*Lqx8~7Bv zwOc)QMot(4@mvt*7jJyp&&0$3LbfTuj)5%1c-nTjp|G4`%f-dPiHWPc;mG%C|F7aZ zx0_7$yX~TMF;6l+(dvBFqr{i!?qVdS^C|xyt;MqlxLHE(u(&(aIe|G~wDBvMROHhokK zNQ)-#wHKi<;D}(vMHCWZ@lTpIh`b4GVcVRoXA&M0?ZmwCG9z-%1Z-_`zKf%)nv0J} zB8S|wEsM?W!*K3v*REM_>6h+GdLZ=f=+ajQmc;%PtSGK()$5rfx`&;g8ZICt7^)VB z$0iQ@>*R=EJWN=mnTNl4`}pG*FNWBS;ALN{2KY)BueM2*E&XAJwB7*3`?F-m-Y})~ zTl3sOP$F@Q3qq7Sm1JNY0VdskF(_Az%NOY=-Q%*_T7$1{kliLR<)VcAe6Gd&I}$Sa zG_Cm+gx81bPnu+cp<;xDo7DYCNOfT9$4cX=6%~E@SJ8IIn-(f6#8ZzR8PG#%m^odj z#ywer6nABqa!j^d*oB(h($0=I;nseJ(5~yULNKT#XQ)?Q*nKMLnw)myB6Cvl2L=xp zHnswhJ@&Fc1g^;NDq^DybNWi%T*vN^w6%pu1*}RXLZxGZzKGtHRqWW=*%1rg8wa&fZ=IOg?`TI~OE%%Ox>R}iI_mUcw1 z12)i1-7^vuk?IO-Zcsg&T@08V>WrCS%%*W=z$<=M)}e^{f>{h1jKu<|%uI}l z>QM|ha}nx!?eh$(-IX*UxvMH7{>k*5)G9=34N!VsYQymGU!5uK^0uDAZqgP~^VhrB zq47cEvVnLqMV1DE$^gj1iWk;E&1NzuoROYxHC|yR1U8xOa$jO)6?Yuwz4GWIIdIRy z!@R?Q-*9M1!?n|&e-5iV1#-?Gpm{X#;Zh{Tfl-PFAKd^k)<1h2KYa4%GbMPWJ5Y2y zo#kYw#z3ihAq3qa4oQNQ!u0*xEQd_Xf&(u0CPE9X11M(&ncbY7X{|y=%EQ;@^g7W8 z#`@wOUsWNIz9q~pB~{FC-Yg3}*-5n*xTb6!9L7D66dT)(rmztg@&^pAAh%e{L-_jF zOBd<$egto#tATWm$;uDN{wc?%Rj)#6XgIk8{Hb_8p&wJo=V|7`^dHCv+@b8%VUC7t zLV)df>-(BZZ}{F!6@T3mir3LoIVswe*jcAtv}g=EXZWFVJxKW6MjQ8!l@{lOgAZCK zUsi6>&lM-ICj1)u0*_Dpc|0k)(6z;<#wXBzFY*2ROLiR{1hF%Mi>Txk6>qe?{}oxs z1A#j)zI6<@gTK?_i{>cruYu0iUXXr}GNiZsrlg^MX(0niK35B`kGor_oc~IpHoEn&d{L17lpq+`EL!aQ;-XYq&L&0x|SNX&VfA-NA!SB$Pf!SEbm z;d9yt9eP;P2E8QPNO{3cR7fzvtBjgtVWo55XusR*Ee-cQX_b7e)Af@jHT~S_^Gq)T zVvdm0q@9>hoZT7wfQ*>E?P5(@O0cvEvB0M_xL6p)A?<3%CJ`hBa!F39jcrL({QcK2 zUwbOpn}z@%J3)Qt%HDEi+~_Za~p_VMo)MP1!Q`BIr6 z^i{B{CoV0G9xN(g0kuUZ*e1uf1R%0Q`E{!je24!oJB!nJ9l&L&*xJUwuIki*MiyW zH);!#9v&Wk@Y4-7P8~l@iKlYpDUckQVd`Mrnn=aiCc`y)sGl zw@wLAi9~-6Eq#_fhuXQG&r*Se{AwMrgC8D{?(y443$8xzb0uLnhWQhjI(=VDA(%>N zG6YsPS4}zEh&8KtzuC?T#k+os}DQ zLlhK0fT>m(jsbdhNC)&jcj$ZL*$asRo3J;3-Ynsf@6W|`vr$mJj5x>rOYeDIHPp5W zHGM|woh&wR)OKIJs4g>GSVV|nH|W*&%GMTNBFe+4`Z0_7*`&56BUxbdEsf}cfSh6$ zS_k)o9~6+U%|ukIA%h6Gqhzu5?<|&=oSnpLbbYZ#*zirBPROT>JFrvgoka%0dbM!M z_d=!asuF_ka)NCuZtk9xR20evYKP?=;YJk7;7g=0>u71*bx)4h;;Mr+p4XxMm@MxY zK8A`UVf*=D903OWhwCm-T=~q{38Ms)>fy#-#ZJvHuLEWKhtEyk=L~r*5tBNDi0}0l zDh;-ZmWm`y_PYN%z)%1G2N=Kx8fLiF90@pY`-C5V<3JTGdg9pP>;{e^X;NtOtxaY* ziHFZYoa;&;*eM3R?-}*2;`;jLX4cx;+Rp<6y&BNqNU(aZ;O$#=Gt@X>;s)?coFW*8 zewpr*iIW8o1eQm+XwXZ`kK1FL9ru(d(`yezQ$TCd!CgG$S@wE%dquE{%Y?Y zZjho=qeojO+D|z>D}EK|Cl;;-eLqQuYr~m_8p+W~v7xvw5in!xf&B&Mo{i04HKr+_ z>hlj+@7z2++~?0VOInjGh9iQ*o4Rrt!;WJL`8_s~S}o|Q^L|T#MA&wCC{Dj9UaR-z z#|I1*de3X5skK*dTP9BzTgW#?dW*GQ=~r*BU0>hM<=;jz@d|h3pG6PxmI-XyH-mOw zSRbQBdwy1;mLduJHcN7qsb3rNtGZ9MeRHF`g}sPaxuS=a_|cYpXk|T2c=-lJm#)v2 zeU*xlpFp^pv)(vvNkf-==ZXB2(hcN>0){fv0!GN-DHnyF531GJ$dU37znYAQi&?s+ z7}klO(OxQ(TE2O+K9(Hz4Eegq6+?dtWn57Ba0PvQZ2hU*+r@1yLBjbosF*~;w(x$< zg&)mXB`! zO}6|yoF=^@SiyHFUy^lRe1DU$o%KS256HWJbua(4apZ)6WEiv!G}KOaN4vz|S{61m zFoTYI7xwHl<<%<+%F{LH?`g1&vGT6&7&D!; zv&{sOOHUdO@fG+q?NyrBD`#(P6kT~thCYZ8p=p%OF=`?BpcW+WGIq@`&_6D;-CT8t zZ1FzRX%gZ!{7_nV=WIJ;0zLep;dM5@=rMN*Q{p&Z>8B?9G0U~R zsOK@3@sAZP^Hjp(x58SZ`D~QLt$A0iXCgg@xBZdNuMm144mJnq5Q{LwtA2kmpq;pv zC?-GojFt9hr!xV-7`&X8Eu_L%^SG8~_=1|fZW0j@(?Fp~=}s9|oOWKQugQJ2Q%Qfun^bCI*;|Yl48t$icr@Q78 zk3*hyH97rt20P0C)Hhvg*YiYgeZZW($U0hbfBo+d8Dr~|hKgC+6_GdY>~%|w1#><}Yd8%@8C27Wk$ z=*NAok&#iB-zFZka4H8k?dCG};lPBXyWo;UcsBzs!J;L+r=Q93YD95HM%~)yutIb{ ztS#yF`%69*c*CD>0w?waa%P)rez+l6u+X95`TEB#`zW_1UmVKq2M?+gbiSTDB0@lj4)j+?qo8Wu-bEzta&Iow!K(E?Tmap(xKx-Fw{6*o zS~r;WaP-)>e-XaBX2VBg8AdApExqi*g&UT4+hs*ThV8poEB}9J{iyLJ4afHO!%|Qj zyQr2c{F9zXaN%*GN~A1FoU4nAnw1sTK$dibp28VDSi`v9n~{@~2+VA)fejnp@I4kC zZMk2r_0Jyxw01W5e0hr4MM)F0$WF<+QJ}rl4$O_sqnOP7IbdigFvtHvbN3hQ1nmfl zhRqkDH~HU*J zm)rRexoM)|$ANUn)8kXCS$(IgsF=x@GB&Yk!q)yCFK|yb#?kvQ43m2C<+u(Q!{Y%6 zA?_L_df$Cp;~d7M>f}qhQFXv8ncWeqHZY(b&ioz;#U1feP@ze~I>VSnIteq)L(EOF zBkt>HmaGgN^LE=F>u$d8Sif0@2Z~%6XwL%9oCL#0Ch=AgwqY-e?6i#&V1>D*o*3SaB&^I zaf>gvueYax1oQl2{>O#~=P-s6AMWvNpY0YdRw+fE&>v^KNV{$MN!o60Z8`rH)ztJ< zw;is4v_4T+XXqOVCSkDbyv^|aPS1(KC~d~jNs5<;mNSvnO}T&%#?I%s;Co&FOk()> zLKGrPMj8R43t2=#`hV!P)bZtSQgT?^q>lW@9iYW7<(zK|#^WVhTVA}*4=hx( z1S%Hh>rc|;e?g7C%j*C0i{$R$&x5QP12V2zes(@7rWwn$*pVw-;0vsF5I7)Ql=aEo zq4hX^XHyK?*0`R^x$fpgDm`Bmr?H92qn{A0lSh}r$DOL!z2F}4+;DVJnyN?g4ucYm zef7@Z-Z+>_*M1@Pb8Ee$9o5HVR5e` z@7WMNA}n>C zVMN%eCUp-46CrMY!{c@h(T^zH@UgbT3(J_Byt1G42P^bbLfkdsu{iBmw^b>NFJQ+Q zA;n@xyVa7u<{e#SAA?#-bUVgd7WC9p(y}-^QF9I%h(vUx)R#NSi82(QKd0DCnv=Sw zrN4;tf^p@9Y-?fe5Xn4`gn@yY9B(##U(*R+(yVLOuYZxA|EI2Wd%|ub^*<90cyn^R z;AO!yfnQuT=u21;yUzZ{$)Q#brIr<3-`?6Xu<~tg=urEgUf6m!_{%d+gNbUD88Kr|-F=;AV@Nxqj7-cG9>E z5mgy>wAyAclH(j^L(N&iH%z-@N017Wu&CS)M(JP>GAtPcH0kP<)o@Fyd4tE4k7@3b zg+P#GBtp@h-*dQOb#brA zLhf`>{|IaM3MIz%fi`c7P3G?Q4(vFaxSbr0n)618H9RaLq}#zB?I3632068B2u(kK z?xtYj@qBv~cD(-Pc{-gHjpx+}O9+%X`%+gLl3%_@t_e{p)5o&8|>mHZvX>&-|Bj*qwx&oWRgCK;l zJrTAUS8s3ff3)>G02Y`Z2%u;4ga^1h1>)vCVVacx-0IK9DUN4O*c# zc!{Ziz}@lSrk!@M=<(&Pz#E@FeR|Lho6bKf{;#8lXv5=;?Fmv!v>uy$n3%$R%#3iO zfLJMw#tFmXB9_ux6~42v5~t0Q;ePVVm;o3hePblV4L&ffpu#AH1>D&^v<|{xS0-A$ zsNa5n9Sdq;tKM(#G+9y$!qLEuV3D((>1M3<=%M*J%;ILqJW+_A*tKhL;&%7|ZZDVm zGQy%(mknL4177yEgM)(t{(e2o-%Br>W1OcIz#4KUM z-S5CWZs1#Ic+bAL{oECXIgJ?M5FH;8Pp>^*f>7h#_gB5fyuYO=lGoi5m1h*8we(mX zX4pJC3jKIXKjMn@^1lE1-zBHm2@jiO-^16W1g2sF!vql|U!*!XL*EE9*`(Xb0|8$c z;7fD7@p%{)SocSn^tU+mf6@R@*03>O0M{LQA@(624F0dc&CCT#AO&U8lR8(x_Y?q; zcd=e>FMn=OIPlM%wHYqbn)v8Q$0+hB68JK|gX8K{w)Fl9O7Xl=FkbyITfK|&*Yl;8 zk(Kc{`Y9cR_ok^ozehRh6TBVXsF^E8MenxKZ=WG%$91}wo=Jt_Mlg|K^OAul+E7?K z5qm>iBqo&PhIs8-WT3#R4 zjhg%Pc4E^H{v_Q`O~ns+LLRipoaV|ry_6f$a1M*b?lnpw6~tM=0AtDp*PM0Ib}xVx zO3Bfw`p>IwXC2rNyh?m0`S!`xPr$FjV{cXK3x_+USX&dMQu}T=Op)N_@s1vDqY{1R ztC_G*x#@>HNh~)P-V(!%`LMw5m8=f7%McV4-2FwKMIrFiN3Pm0jC^IoUB?fYTxia2 zM(`0UIrvEx6!0Jb@V5?n%<`FDsipYp+fdlrS3Z0R@;bc(XmZJVA&o#8Q165IkkpoD z8Hu`QVIsEdjcA>3U-CrkY$|8Ep9b{RPw)iA#kuA+zGG)=K#)bqJ4!E zc!$6V)@r1qY-3$tvGZ>S5`UVl2LoxB)2!H=^^0Un{XO`*Unk^w$I4cfVtyMX!DSwAw4a8m{At7xDet0X< z0A``^`t@txQdQP~<`cvYUFgqW*Sh<7XU-I)rl0&>Q2g&mZ%F{a1$C&E+fF7xA3Lns z><<6@m^ud@eYA+%Cp9-W56#TP1Es`L1hsre3XewjP!$o-UKlk$6#@I`6Zy1XlnCQK z^V+rop~nB5Zv1(+til&Y+o}c~^CJL`M%j+$$?f`=~s+=7mL1!NlRB?mRCy)*&*I zB#t$bGeI3M?Gl4)dpnA+}oX1U+ZoHL#OL{*>fvls==^ zrTjN9$m7uGzfaUXK6?Vm8GING(-ZSRR62DS3L#)nEx{P>Zi_@?FMIeY?g1og1y86+ z1!F$GsE#4R=C*IrB2WF62Enoy0ATlARr}7o00`9Fg-#g_RS61O6R%qJ6{SEoB>jw@g7geV=KdN0K$S z_m?A`xa%jWi{=fdEH^Rrc)yjT{KJ2|^*?{O`T(R8SV??0(iKPuqfuz%jeq8~4jw*` z%rs&HKR-~f4y7cjFzYh32#!*Trdj}d0fjcf&R?R3Wkz9OmnlV%-o3CTO(|O$%Zq;| za{pKQvz+*&@St) z$j@w;_oMf*-^BVoywmYu+}ruuZ+M}s8OB&_S`E^k1m7->-N#NGh-x`G-J)z7fjds9BwE!egy3()}-uP0$I6zRJ-a;oRcT zrF?}QC-&oxifZ;68L=CK+CK;yLE^P8CxKo`<2+Bw;N8zmZz1;S&`IrcDuQqibg5I% z0@9>E@PXm4^Ud@O%OJpyab6Uoh(t)=R0bp^DO$dP@vtiYmR#VcU=1GAaNw)85f#7_6m>&-T1@* znH;YYq3}{|RQ%Vir@wef%usq+Qek++12!swp_&Kv?LgYUh+m;7?lvb!LPAo4?A#(S zivCY#$FJy*61PDx%gxCVR0573?_%3pyBlxakS7JS(RI#D>*Ik$DsOnQoIKYYpE_!f zp0G`-C5}0}{kVa;XY*#P;9kW;518fMJor>$Pq_jKm4uoGFIL=8A5h-l*BocUZy zX}OBF_W1xc=WVfe3Y#TsRnzTCrFosjUlt-P1afL<4n{~KD98ar;t)z0wNrcDM@XPp zKtW21*=i996+Nx3t8+AYo%#9!l7oT0oP|)pXyp-CC;4c)L0(n1%aVBhebLh)k;H=; za})hi*vK1s&~>e$h;&FMj9_@(!`EJ3YY>H^|3OBA?&=%Y=s8%?wL_dK1altVbaL-l zv^#GAhl<%@l0$Qqc7j`j7dI_DYy_;Yi0zLF}g z4gTEYJY?=$&5z4+#;$R;<4aP}&!X0!CW@WP(*o)(De_+3|1oIIaS>0LTpHLmj{}5B zaAY~`KQasr$S^P(E2^#nAS&=UTsO#)@u8G`y4kVZpP_c}BVJJdEn-ChC(S5k9{=&< zH3qc{UE9W~AQUk_BU!zDyV@ukJ~TBwt!DeXGE9{ob}qK$+n-6CfAPS7260=H*v~Eg zTBb$ZEH!B>7WgDM00u(XrvzWDI8|=4GBf8*qgi3V&eiS!-XBs3?=X37GP;(d?D0aW zKBVcJFUnix*KZ{K|6}dFCkLz(g9{0z*-pI{W#s-80 z5MO^dH#J9pwX!r?TdB(0b!x8%wp{y(v)q?W-z^_8Ok(9o?QH1E9@>~D_bb7rWk2*h zkv7+FG+a#13X-hjIYq`MCx$3!C`bo|_YN-|c(u87z>IR<$jK}pP4qH{UOr@zu~~8d z^ZdfsYN)+x5BUXGm)(}hA9=qmxO=Sb9+li?ALsB&)1%vi2?q!4TsG(dxV=fcF=&7J z+^rHle#Uv4re{SsSMB$!#we$2quzys?$#=U_;b^)ERdsUf)`3@^5dN46QdoIsTw-uJpm*qJPaa-2;?-9~wWIK}ZhDyHbYu+Fc?Ro@n??&U;=T-r) zuObFqmpE)%s@at|a_Qzf`a8+6DMU~Izsv2tTc3RU_RW^mn`tmf&U-(Ui|r_#4h~lu zXYaJFFXIA+sTj$;kn;CwuRg(99hBrV-W-JYVQ&t4udAyIHIGr}W|0)qRi8EcU~js? z*zV3wMEgLes^C`(F_Dq%f`WVOyYuSr@8d6U=oQ)Sm!mkJ4_|!14h@BVVlro-7ev|V z)2D|CHgj5uX|EPn!xR+U(=ut#)zyWtzu~qMQPMp3VE9hjJx*zH(YS2YmMv{jJ<%vN z>$CT)Pp75n8oTV06cZM-{LCO0pP6}=j&gB?U?tMq?C-0tCq+?Gn}178H*wQh3!{;JKNU#A;7-zf`4_ zP436gj8=pE>s2G{6$RHIJr7w!Z=b&8Z(}~*WAT3S+ks>9Qh6h;Gqt0$1etm=8aI|t zEL}rVuE~UM)-B0^wGoxjDaB37O?_sGshV|luZiz(m?SJ>O&O#5S>3fA&6Ei7t%=f4 zxMn!;Z%&Zf`%M^E3SUWEq+0AZEShs{@aiKpmmR!QGT>w$Iwu=Lw&eZvn@Z@7#lzvr z@?Ix5-KgC+kDOK7Ja%yNbNL-IhTXATHHC5O^n*0&rE$C{a&~{BKmWm)5Tmv}EK@i2 zQp^WE``AqXbM3zfz=}CW=u!2@k+I9k-KJ=d-M33SSvCv0r`tl5T>AR>Lz8W1sD*{7 z&skN~dnUIBo?~KT4}|J($vW~{abd^WrX;92#4;&d*h>V zSAlMDPU>r*3fq>#;#Q+;OIxGw=vc(;oaY`_jZG|#r0b^XrX3+h$mGKWeMYgfbCO_k zMGtxSnqr8)f~Fy`U5iq2AtSwg8p*MnX?eW@8qnwl2e z<@ig=gK;FyV~nHyXwAEX(U1gbkBvl=076C6z(kO}!gyE9#f_}(;?7btHY@7Jj)yTp zw&Mjc)011<8>@O=Uh$P@-x~%p87FE8)SjWh4O39sA3q2sLIp4!W1x$3M)wxkgu&${HUn4hRX(})--6(&S{zPGo?Bg;7y0~VuVnV={}#+Hh(fQ8)XaWPJ$m%$ z%+MX4is9t}{E8%VqWEJxRGdadM%qFVA|C)LXb|FBtcO97qt(ruqK{sHw#v0c*vlk- zOQx5Vw;X;S&%t~mVn3|+*tPEx5MdNwbF1VwW+FhMnea3 z9;s(lSUqkyeOkE=6VzTE(*0}NL;*Go?A3&61D5s6$au*R?apz}lm0{mE_RN}-mWel z`e8}_xrC^EkLjMruU<(%d-~LNxb7aD@eU{g6}vvJ^X%EP-uV+}vSKArPIeIDiI?$kic}f<@p~y4O+?6i`u=&8t*^xi?`L1TuCVRUQPo(r zKFvkAbodu*do;D7A%bAk*AqqLI{?Afd5urzVRf20=3FJXFUU@B@SMEc5o zCO(wqI46CoNwHm2o@r(Fn?N%Aw+|RERQZ%sQc`}xVX?qe-okA6PZALm&`?E|zC6{P zlb-%IuSVN{0LOY(_}BMp(*cnVG0x6W1ejf0*QOj-IP;^_W~&#)spzl39&(v!r{MJW z-vt|f&{mm94JP8xBSNx+q!!%p34;#52d=BLCU85lM_;*XhWy4j*Fgop&-?KkQ~CB# zMC>&ZA{hxK*QS+T>%tt_FuOUlhW~&MJ=BUO=&!YGh8hK6-`pgzAn}{)Q+@O6*RR7Q z2kr$jle4!YRQ-bfHu2ZAh)^^nEeQF1jI>4N3yB0FqN^^5HdCnDi z%$aU)&tXf6`yjjdu%x6Rbg_w-_kBg(QMEQ4ERugv0#=opL6V)5!$!KMd3OMISTp|e zt}V!Rnwl7rV*9-#bvm$ryI(L;24LVf^-pgUD^!#c=D1H*NSjDTaQ~tjy&x`MgZGAmuL^oT?5>uBfXA^3C(ZBbMZD z(BFazooodBFRFKSP!HHmfg~tsCljU?LH=1Q>t`=^m!TSx3Q7V*hi24zApgcNMc&uJ zb43Wd9=y1;6crx+7%aF(_-p{{zGDn~PNBAV>YPzqw{PF(J9?D6df-V%;_hFHwTB8b z7*=_lixYz!N2OUo*AxHk5jI0dc9*lcW=UU4yTe1M)DM3H-221 zfjaGd`}Rq@O-9BiBp?zKPGd8k1>#nwoWRHR}e{AvS5@+DgGOqiCxMD!Jozkc{C z2F@SZ528J){tEt@R%$y=!%21E%9oe708Ta}P?7Cn(SgjU=J%Vf zqs<@SB4Dx0jTmn;kfa0j>0W4z%js(LT26NMw4{G5hH#65o|SY>T}>@9D?<7AW~5Fb zeC7036r(u!4jxk+YPoj&Gy<_qKtv6Q3x)c%~hC) z{>4#i2uERQGKm7%Dd7B_m5ig>`D5%DGuYjQWt#Wn06#iZM+0`8zugFdiF!vkst9=R zBzfNpot>Q*D!q!SeFF`8*o<0*GMA5~o-y)Fl z-a9HgxLt9O7Umu1?@|<@sHYc^XNJo|ZqfMuJ_-^+3k1!ool)Jry( zFCXUMc>K||99f@in4*bElAXQ%OqER=Ysf(v19~2fcF2m?KX>kjY{s|azjtHsE+|?R zCM8C4G$=x>BGWsuol_tpCIbS=wBI6%gi}eRd6{y@F4n2RC!RmuW@$h>%nso4AwSRV z%PTjLOMUq``~|4Dl+d|oZ2X*rYQJr@Ke0&8d_p%pGwe+J!GMhoFOxEtp&@zOcJ}Yz zz9GVw>-2;1)~p+XzqhUqVrwaC65 z8Yt~3Ot!D)jTGTs1TP{V9s5f(Ay>xW;>850x)!%-Wr5B@SJvi-CI@ghI(qs*;Y-#) zwX5`H9&^dfrPz~x@xg)3UvmtFDjIT+k^V+H0ZzbZa}pf9Am#P|*UOlLP~AZfc7MfE zL`npQ@e%|S{ZnG#BFz7+4Ov8%+N4u12T)T}i{w}&%;O>UlBOOb*oz_bJL7WegBk3> zqIlodW@u+;mzfkRiz2^nmI_5FJf9hQTW%{^Yins4oIQJXm27bL?=Qwk0*hRIVq=a* z9f37JbhJ9V<0oN4#t47kmU9WD8>BH-j|1HGzZrc%K!jXBd(v?_C``^niv2UeRSL6- z6}CJF8<(Y*@&492r1|FFx^;_PGpYQ4AB8Vbq(PXQ8xgm|lrwKecLkS}NF0>+IS)k}pFk4m>*+m#QfiP?T17T( z23M|RRVF1T3ogB~_*+XzBYHFQ%^YwOV#Iyf_ig>ZmHm7|4i7DbpSuchkH-(sKYWF( zfdg52;T;dxR}jZ8<-jBTPtwwq0lP9uxulM^Cgm(HUISB419pIxqNxDOD$N8D~FMArh29YN1c6hOt$EAQ335{F)kU0W)lEFXaJ99^)` zRzRZB?A*Dh06OBj_ag`Oza7KS!kWSd!F>L43)nbW=Nrq;VR#WFxYcxjR)S5$ylP~@xWIPeWGYkX{^GW34_J4VM|;Qylzx98%idv&Ra3!`>C1QJ zoFJti_w`*sWC65;c!ecYnYiIzzc$9=mr|m@)I700x?GPa=Ii>^K4eKM2Qx}iu5Ye5esOd-I~C@rrmodJAEUzOT|Jnp5piP& z^$`7#EKn6ko6LFt47wQAYs_zs; zJbL71C)G-i34k6alTcpR_xeT=7$xV-%<^SCW;4sm&RbboF)8@H2E<=y&&y72!$e-E zQ|+T@=e}eU(c6-uXDvA}`9{*au7Gy;?VqfyxEBP23UQPkC&mH@#Ni~b2a0V6QN7N? zd}#vj{J4%@JJIKv9`f^JKmSv`1Urgl6;J-y(~}8JTPjdEig`9UuTP{Kyc)5ojH1?= znVDz6v^DhddIi#z7&*GhM%6w~R6vha9&R)~5E&h9y$YPb$Hg@g*H# zQ{-zPKnKQVb!B$40sCMyjF)Z<*^8ssd}mnPb{sqm87$+fB2D!}IK~?b4Q#~#?;Ssv z>3XkBe}EibanE`45LUShkTu>8?l>z{snF|rWg_e5y%>AE62XuAN?noZ~ip+TI@!no2)ADmozR#wmCH>X9# z#l(8d319&K`071(1-e8r0ltRoGPhEWiGrMuC1;WM=^^`H%52x6EM{e1SKX*Qjai#C+@{mGvj}(7%;XMEYn06 z*%nGM35iLtK-`zN^>Yj+zP*dc&d=|Dsv03?-z8AF+;veT#~N;(KP)UvD~^!OkOUwu z)DQHPU44SIHiuZC{>a-9y&E^mE02vEYF88gVY}zRRyG$;T>2xRb;Wj&hMaxyDU9>> z51Z(s*DCE9Np?^bqX2BTP()MHhzK{1MCilTQ{bqn8L~3_B?lU8*~9(Ad`FJF$gy6X z>{Oqam{5upaRsU!recxxY$t_Gutjy|!M3)x(m7rThj)YgPm`6-_P6fZbK5^E{#>O5P%@cvwI6iaK)@T|#n2|WY%F`iC%g4Z2((d`q`qc=E#;=6F zr*+W<860f#m}iH$z$jL0!?@o{OK~>H;4a!fo@8Vrt1rRO9?6C$9Zp|fJ)7#zw+9fr z0ljZHT>GE5B3Dl^4+$fR)q2x7_}lHn!B&o3u3?%1gL&p?ipgiv0040MUpb#*Q3$}`Eq zZcmws@GpYhtfZH&@j+(p^^+?f)2~c^%2LzNIH#)OeIWPziyBkRr*&H=gnYnD{@U;xzLWclIR~o{LFRq(_HPdRJuZZdPek^+(G7Z3 zD4g5j2=Lc~p(MMY()w5$vA({(m-gL)i1dQyKqdUPGzX>6K@A}vKmSKC*CHQ1O8fTh zvh$eg5YWmuAirv3@{&ImptnS|g4YjQ=0V9h?teUKD&NV=z;Yd^&(op*o3}GyrV5** z7dp{vCR60gJJi--0SpBtj9j_`tz7lMoS^pD4@ofd0%e$IjNlf1UERls zEO;Bj44AnCU_`Oc`n3usLZD$jr<`<93t_E zEcQWvSSDG#BE?^OrKkwJM)oxPYBDJ?UP{t`Xa^3PhkHYD+F!2WxUsnXxSQH?l^SjB_S(#`{Fz>#UDo&5W#(ZhJM*!u+$ig}r@?C*C#<7xz2 z3%b=3E?l^H(MtI^ge`US^`n3|=im-La1KsAi!tpWWAjOc&VFWKSp%Q{z}7SaoxF?( z6P&AbVV|2PH<9vx(;MISfy#_0!iK54D*u{+g%F&umC*-x6(1rnxV)m7`ae+(Pj_>H ztUX?2ZerlunGsmp5exXOJDG;{;%OwBSt$mc`d+G9OY+Zx%pTXqZbabcgA|~6OC9Q8 zIif8l$!{5^f}_qF%B)`w^gzClgV5H!yNR+l&Z>m5>btoWm%xpDk52^Ifr`FRU>*Rt zo%wjPUvN-4CKGVvOMeKnl@eyS&KnyoUSeT2mC94cqW-So{+N%7f){;f2NR_f1X9 z&;$bf_Cjbn4e0~hmMXkFnE)WyS}}%mJlC5eQDGp1uhdspd1oOLOgw-*7axXuP_@EWfcblLKy2Icb*vF$f1tevf$ISqwQaFP=y&92{Lc z6+Tzj}^PZ|1UYZ!A;M91{JdovKg3CaO zJQn+#yG3Cr$Lbzo;r2gM0GPKzp4Y;28ug< zmO2V;Pr*N{ci}=j?EdyIjAFAN7GMv~5brvGg3I8~>DOOxIVCq*$#w04QP#lomuyCk z3dfK4$~%#YUfM={=o1~!5WS7Fv9^|XJ;-wRH+5e?EP>72_4G;hMiAJ6pG`&)i{U@w z*BJn8cBl-LW+KL?x<}vmtr?JaFeET}UB&N_2Gu@|my|cb4c89jn{lcs4kV@SKrVm0 zO~t?mu~;xW?{fGn-bHgVti`Sd&GeV%yn59)3;mcRGMx`MZw4K2-4erU#kJg8b5zXO zmS6IQ`rvD>uKK2?PP?{v+Ec>AFNJ-_{+`xgI>IcqbYoclyL=Yvm7cks(FXf=h!ijyYLaSdgEL1^@g>G~z)6AH#d}8Yq)2Fs-s* zyjU z^U0S91uS65fYaO^`ru&oeJ1w;e;F$AzBQZ`sv z5p{>aHAzH@A$BnPVT4Qn_IUy?)|tUXw!AU7C_#ol4sIsiA?6MsP7P?860|-*z-K@} zVq&RHp&R_xkKyRpx2GyyC~=6}FKC=OJ?&s+ZGFf1-UJs$FaX>{6HuInn5MqYa2--6 z68WZ|_U^N;+4SA`@u6a_D(BH7IwYI{J;1fSDZ~DNCN7vFG&izh$6$clh_4*_2rJmJ z!z|!am{X)j>7f3Pq{HyTCtf_-vnnO&{57(du+fd1Od)J#Ks3P0)SKVnIpR-=Oi9TH zBp$?kua^rWLR{2QMX@jg1r1QEju?h+(4T%A`u}Q29-Cw4d9C0wsCd+@${6Hxm%32p zWg(Q;aD?Zd2_=FbWKM(sFgidHBrMHI<~a1I`e!P8yzO-N|ICIeNSP8p3tkcrw z;AlAbN`;F0RuJ!BoBXB*QcD57HN_nJ#46g7Xcj)kACnRGK-U~HmnV0G4(Oni zp=Ej=5*i2h#prg8>`Gb7RvhBC5Ly!^#zUL#%@9_ePi=ps(JZ(sJ#Z@L?*4~K|56Z$ zD-$4S)j1WoRY^;CXI{h}M0cc8a2$E#GyVcF6Bwj@Q6;$bQRa{X!peYGHsC(+78e)A04#$a-5$buYMPq* z*RCy&>=~2)y~Qrz-o;r`KW0J8f^40;vFixi&fUkjoPqHZE%Rl2L2F2cb?ngBcZ)&! zoevK3ehrAKaT>fj?)5WJ8d>4BGUvA{gQx;%qqX8*78NUp8lo%nSuX^5W+^l;JkY*S zOQQgd9VZCSYn<_~0Rt!1y7Q~etj!-(wF)AsVt|Y9WHllU<=uiW?3e;< za49nWh0QFRt9nj7BXdU*$sgxACQ1ifHE=7-IC7LAUVnXi!>#z`OIzfAjiX09%dU<@ zCM57Ly}EpYD8PmM^D8%XNvF>`pxfl}YnCRjuD`321do9m>KNeaV87<>1PB8VJ zDb{Lm`QvT-ydMbYj?LUdJOBbfAsn|E!^b!Q@A6 zh0c5Qyw{<@EMykCi>PXA8$#k_u|+*4;%J8em=|2JEOW4y`p>3cgiY<=ek_gl0dMKt z#fD6Vzc<|w#w#ipvPAw5#qT11bSX*p%e4vBp%haHhy(-%W%=IMq1#Fb%ki@yvpT`I zDh@r<<5KgCt|Wk(xhZ6B&?k9mJ3E^%7m&J}o7)D23om~al6C@cq6ZEi`L)4a+xS(S zA=JV_JoCc>#_x|utO0iTC(f?^1x+4Y`grDm!V@9(aL=0@)&V%NGm{` zLkDHtdnzIEAfgc(5zb_9&fy4EPC-vE9}|b_e$(_ba(ev2GBLy{$mV}KYWL6P z|6&fGtWYAHP*R%Kj?MoqVU-PTuFsEbtuIOYZoGs>v{}%ku2+BkV@C(#Du5i0%+ypx zp6B+aDPUY;_7E>QvW(UwzkjcmYf@nVj!TIiwzzHk{K|9+dLF1I)POmMSFGRrjgMGC zudHSoMc(%(13@TfOUJ##1q!k$8N|-&Q{B)Q=!LUDs>`}Vt_s3OJ+hdGGV6WTu8y@H zG{O0qq+s#o)RXaw1VBd+(|S7bF{A&>EjP#vnPJ_-XnjwyjAU|Z$_?6JdtYj6V8cF# ztAaK|4xX;xGSW9TcJNqY6~3|f&BapK`xhZ(k+X4SN?2Y-Z6SmSHzFgN< zkh@Yk7N=mBY_r-MXX9aWJ~s5@4>8S`6mQL4s-?(!qoaz>aqE9wl{w2eRKjf8boTaY zjoUNtHn}m6yzJSm4c~#R#;b1*3S?*{)uiBm@E(d#z3%45L3giuWFa*zZFrB`M?p?@ z)Xu+p#p=A(qFw=!xZQO3(AL)0?L9r?Z_yuY_X6+Fulv-u@9liiNZzv!WnB3~fQTEw zlyU)bIrJ8ifLU7yFv?eUrN`3Am}xg@@N=u|Jo3BMNtoEg#JaULDf}t{Kd?52#oIgx z4dvV)Tt4?~dNV6!d)j90ownc2mIO)5+B?)9*T`qM{!FW&8Qn1<=C|HtHr!JBwXVu$ zW$N82{(vy{itSXd!ny(>BVz3+Ki0~M+tg>_lzJ6#-1Npzq~IwCGq`QZnBJ$5QzvVl8TDtb7RX%B2W21Ugv~oNFXT!# zKL6KDkhv-&FX`9_TI#_YTs{Xcpie6P_jfGX&s!?aQ2B5L_M5eIDm0C*l0{LCV%Q-dVXf@}&-)oPQ30jt{bJW+_>3 zIdvM5$7^$$PXC9T#>j{vcAuRi_dJ7LTc+t>Z|L|V!{!-~S0X@#d-p1chRwYuPj!oD z+QZFl$QeKCJioQn_N2U=5!lY^^y!v;0gH#)fj?$vop|SPz^__gLBYu+d(R%1-Tv|M z(Q4DuOP!okKX**+7VH(>#2N7uXiDS}H9@X_;t!qw?5UbR^3r%6%JwF_F^<+(`s2f&B9@G%9y7osmHaNyhz4GrzMt#j*W zkg&!cv^r-?T`E-I zP+V0cF0{~}d#FcwDt9G2{X?#1(!`R<5&~fj!q*;x6y! zP_X$ibw04mwtw-Qn#w+6TFl>nht*Ikn*qKaZjc@I8YDZ?w>Uwm>X$@(wAtbX1-yWI zkGx;W>QFzSh$3{jbm{(m6b_O1geCC|T8l6qqNCu|l8}%{%gLeopWfj9U?866LeaBs zQ&aWsl{0Qz0^C>QB8!relC@Rj>kZf~71`xadgP>m(rhuA%rokI7iYHLR6Q^3+_*9u z9af~sGd6S~-~s)5xg6oi9{zjSdmEXb_E@HvrDX{oXdENbbT5)k_ z?s|3jS$kZc>l{Uocl<@q>|XDvpi)!($dZ>Q)!O1}rRl2C$oJgb8*d0-yLD6$dIWUv zns+~vsBKMfuDs{lHvQ&A*j#djdt$}z!@fjzk3o&4(LJojWlHJC{-p^J+KU72t-fpr zcX?851>ZSKa&IRODNfJ>hiv#7?>_oc=%m2Z3MAkxz@3eQG}yJkisDz0^F@hUs~7rt z1fM-SWkemFc|!A%*0Zy*NlEok2(>KE8pgqzaGG>RNcKzp{cAwyWjod3w_& zkrg_9!=NJ^sQu&?>SoSDUil`j*d;gU8=sx6Y-JVYeKe-1NHduHvpg}w!@BB+0t(Ku zdza;QA6PKBJT%6%XiYoHWbqo>*o@pTm}o5pWARq)B?ejRIr|I0wshSJ|G@_r4-hc}2Nh=IR1{ITRy1uaFZ-pF88_jSJ;oFS%N|7pl1?Tf zhwR!S%_=qao}Hqw}>?W9zu}u%xiN zpyl##m)5f=YT~8`S}*##q}pl^v`J#w6$7NvDFX_UyU5zaP0V6i7++*k7snkGSbLII zBO2q&HgP;)AR@gwMX878-0iK*><6jo*>rB5wC7GKXC`7>Z}}xqoMv2SD86_OI01Az z?5_2}ywUsBay;fEcUT%8OFS`ikPg*&q!fog_DZ@dRlLtVEG%#Mm$=+{Zq)7pY0pl! ze76rqx_$?yedW7-wxkxk`7whBE2ua4FfX2O z#h!j9c{`q{y*#a+BwSollB+tE&gvU3!n9o;S!%UWLdO-4T*Bx@XNv(*@A>wT3X{QP zK~^_+IEKjgOP(3T(O>IR`AfSXBp*DDT4BEVmy9EVsLF%ZR;|}RI@EiLY-o@)=iXsw zbVh?22e3K6)Q?nS@*ZYeKkIjs3?l_?sn|4(hDUjRdZ+LFFe=MIS#AvP_6!&@VPhw zPSv5TFLTj~cy-#QR|H6L9+@c}cnfC7gQ_a|*q4OCK$_vr0fM~yOy6xVm|{rQox+S- zH<_I``?b%$Eq$=5_feR4nr3&%u(1bC9`$a^YhI);9=+6~hTgRiCMCk4@t~>fzJ6oEz zvJV$67>CBKyNZ%O<-s&+aGecT_I5p3;$R#YbyU}SjHZ0|c4wzJ`EBKSS9kJ?I986!kpso)vWcE%(m)YrmO7%5&HMYWl;F@?!IG&+Hl}E5^Wg$Id`ZQ&WHH*3m+E4( zS$Zc^Yq$VYdY*Hul?oc zSkAnqeP6GuPpk5>5aqb6^n7a4+@;rAOtQ4Bjf;8e3gBI2@m?spHC%Fd39=erl>0VM z0Ac0E74?6$JSN~GeZj!)$N)l@&yV*rAhxjHi7)kO=^Lr6Cp4I@P_9$t6}=O)raMws z_BMi1KE2MFpRMiCqt|USW$7mS@zXw2GYv*fd#}X32tIH(TMz_~0DAP6cvXtj)ew|| zl}}jrE?jOE<*l23w*r;ElEy}b^B2trZJFNMC7x zy|jlJU|X7=Wprx&yAfrph^q8{``raivGu*Z$*Sk=7EMa@6$K@Z0g+Q$qM~D;uEQ1d z9}k3a3ri%_dlsTbuF!O(?wwk$9icj#Nu73+F`s`rx^001|1iGI>*)H^-P*k3M(F+F z{#G`L{DLu}aSDMB((e+`4^_+iFTb}YoJs3QI29!s)Tg+=_zmSJLlPa8H&4>jDLeJ` z$Lj-@{={?m9u#p8$UA&4`#=(N9+MQE4;X6S{!~$M@enG*Jl&qT+S^qW#*FcAziBYN zh$QPZr^aNS4z=WEmMnk1v1FOC6+1l@_Vg$spi}pf>kOA#yCm9(D3EzIUVT2MD$0em zpi?@e^DaNOk14yoIE{kYu-Lq*FDf%XcgVpZyu3Q&Y;~#xV%}GC0?5KOD(WYaM;=Ge zm>(*0{U3?LS#TE!FT~j_BB+MKGJ5!BiOUJyjjgsEuCf|VOwu&J8OOfts#t8aJ*ud=D|V3SvaN0z=ZSEIdhx*chh*ZH*zEXrWrm=2_w7l& zAGc9Egb`g?+71p-i7wZrBNV#l>=8s+DJi0`M%P_+C9XL*>_#p-{p!1F@*3`*NBq$d zD{Nll99x5A3e^xphTwaPLqkBC!@ybnzC3OLB5AD~SeMHzz9%fDyse1FyzjJ&##hf@ z-+x4#=(rg=fUmWT_Bnct&$Tn5+we$1tBQc2{xdfggiR0cMJ2jjo;`SVc0jD*#zfo^ z>u)KSt?@@b-IlY>4L0}n-c|D4ss4WYg_Bc!y0s6sRv4`;rGG*@c9a>wV*)qT7kUsw z-oX2F$&c0GEC2b6fuRwLhp)( z;kiqs98Zr-*&IFbG`H$uh1na2aMn^0=8WwgJ0q_0)~KxKnI19~;w@J=|ZZ-ur~~ zvCG=4YN2r4UhM0t03U^}6U(afZur#{4 zL2kQbG@cukTl?7?#b{lq#S?cGYk?Zk`*t7xUzkH`ABlJ;euD!8Sm%K;j2|6*U_|x?NLi}va>uXSy%VmZ8>C-7GyQnW_`9% zSVp0vX#S}M`DKgDl?Z=w7qD^-~FyF(A^ADx1$$ zm^}8gsJwhABI~hRVT7T=d0sk-=f>r91Dju05kbSdR9aFZ_;yGwZ};Y-D_&kq5qf%3 z*m4?%ygh?8)q7tBS;U^XI$F5fhQG*RGx0^U^^qc?@p6A9tA3&K2qkJdV%qcPQ0w*? zEW2KczB77ieLVdHT~w981%B7_x>0Y^^9&q}N-2-F=t7^g1-a;Z+%FN);tx)s?&Ao~ zMywYhKCOK`|6&;{P2{wC5Yog=QG~R|=aib1*o87LL^pcMj^0XwAaSaPE$T%1X^k6` z0p{YE%=k9CiCwbtl$+5eSowaun;Z7{g;YyqkKyXQ=6a0q^G2&Nn`l4E?bK%Nsa^Zx z7(E~9&DVSBCM7AVykz1}m}>w0`F?!-1y5g}{GtcSlqX+_n^yt?b5TJr(#4Z#$Fn#Z zwJ|d@e}KC?ZN=_GRpQzj>(f){=y(8|kB|;Jgsl`pAfz~bvHG?uqQ1oyCOeGA+obsJ zNAe?5`!%H-*4#WiMuwj`v6M?aXsc*A7DdGs{k_>LK<(S>=GV?6hk2v;bmn=;mm?7p z`A`#w&a7=mk=Pqv?r46mKxJmf^0n?jmpDnmGw}xp`%8ucl4MMDR=W45#~^l4ssQ<6 z=#+H5i0KGQ{WQ1~9zE^~_?K-{zfpX{Cdm74h(X9o9PVZ%0>{;98yj9zuL*8RnJob9 z<1U?%vQ*;pt>B8oxw`ii5s}B1Daa&LU-uymd_sTVKvGS+RMA|U&iHC6JEQF_A!BN1 z_7v_8kyu@ccQ)zKJr{^7Gx=ZvpW~}vv)D*#I89lTzi8e%-qhUoeCyqTW1V4&c6&7w zjh|+{4<7AP6Z>TFIQmJF{h4vQzUsSQnfl3u~?E38uU z+-k0xqiUWR%2SVY!xGDXLHsoV{aoe)r&@5n)<@=T97~Gp_KJvF+RndiXVMr9IBGwi zo3#3*Jc~ADxmfV<+1@p0n*0)A_QRSWhSiJAI{9eN3&LYe@as_ie|h&A*z4Gd<%w%A z0R>;v=@y=gS2Sf&e|Gi&3+w5D!NaY~T7!L3SfX4zV$MV-*>hCd?APSfza^5&tj?Jj zDw6ypfe3(vQL@MdoA9=0_V`1V`D`=^kMuyVXiY%5SSIA>10U9e73j5HD|9ZZ0*%X`gk@ zSJ;MF-H~#e6bm&?t{XGVI5w4k>b+TmQ8UUoE+8d%d&d43C_XQXTKQfTGFF&{7Wo)> zM*D}wiHrM6@|~(fk21g5uF@oT6<{E5Ceu0k^|AgVgn{=`18O++Q9nDAd@l*|_(3!K z>A#MiKO-b&suWu!1E;bFL(l?KRTP$Bh&|BOdSc^AS!sE7cOBocWXc-Dk0>*%T|UjP z=9bsT+zr#WnT00-A}Z?A$1;q2S~@$IYdkVy=Jk)$ML*)o%!^fW)%5ROtF^uI+>LR) zcBovV)}1)(r^9;o4&s4%oZ%aDv%0x?H{^4S9>Nc7jY?{LD;3y+7~0wp4fZZ%26R{r zIbXSQ#QTV^g$1g_Z>9dYkNYC0pth%rTOlg>jGXKclc%gH=}B)BqPnv`l0v~qV@G{I z$&S^G8WFI6*CRz2QER(9z5PPmn-c1Sp5~Se1|_FIbbTJT4p~_D`F=~q@u%|;@$hJ& zfh+659fTFWk}dGBmOt!_fZqxNL(VeY8bk2Y_eLV9m1o=2(*k)bKZflFFt0LRkEc8< zGyAgJ*^2&kz_HhO^WG*Y^)3$Shj5}q=#Ryv#4#W7jcrU(*GRK)-(9svH0eX}+%fB< zx2s&*2kVoZiO}B8JMSdkYF6T)Df)h_qN#))DV}q-jxPUI2!q9o1CVSNh9sLJ4po8L z*GUx+JH3V?tX8ecIrJ0^ZH!sm<0`jW5;QLJcO7dTYVTF@xpwKmggJfpY0oWU&r#92 zfonFEzl_!bTXZ2~>)|T_y&t*GXj7YO>S37ULa<|9ZpM->U9oDC$roSl%4Yuwq3B@g z!B3x@)1*E8-{U_h=r9U-DK+AL>iwVpo~u8OT1mG_>lyEnx}(DDVinuFszQIDNuOgj zOQ|59@f~M0cZUjXLv2!|)rGN8;G%L&Y*S-OlU2B88)rf+sYNZh4Np6ZmgTA30oAP# zw@z{uK9iNxU|(Ew%1q)cSVL5oyk1mU8G2e2YFHnamNNWw!$S0nMqvw6TU$FmK3)xK z;3g7|LvGXM;~$e}U??TMZPxeWhuF^(`r{OfoErthX_2yH@z$f;q@K0K?_Zm(nPeh9 z@klyis=`s5nC+Mcnw+Zla9>lD>5CSe&2T80xDcGH6_vC>$>MhJNY`1rs=lAz0xaVG z!ktvf2Y;!L9#8~#MA%X2UjfDG!Vg2vr%tzF>5+FRjyhI2pr_S~uS?%P$$2mMi=oSe z8k^Un&I8S#s?84G#uc)fvK5{5+J63|Zrf_=@WJz-ZivXg#g+?GKeZ;d9$tOBp)(h9 z$x+Xr4tq2{;bKyq^IT5JnQub=f_nQ-rZyr$pTa%-A1C|9`XTlJ<+gWdnVhZ z!ucR&4&GV9%m6-@ejTcB66?&IVxffoK7>kGq(%J$Gd6`mCe;|WHE$|ef2E-fdVB7> zyH;i;oHPXPfh~qJ_PQ{$_a8VN;H?d+v(myQG4%8&SWsf1(PrGT`6ujS&HupBYs2yzbnOOkwRc; zD_cKjemGcY7>Z~bAi4R&r%!c^ulYV-h@{>ojnd+YK%CiEt-lhc>x>c)YP=F>$UAY$ z_R^>?|GANDXMcl)pXlcceF3|DQ(c?5s7#B))`;zVOPcVpV3oOxw^e4RlwH-S;?q7< zC`A9XD*`wXvYe4t>)gBJ@m~sYvcpT{(plu$hkyQ?m}6^}-utM@>gviCcVeX{V~h>w zxaG>Wi!ZaXxUTh=RoxQd8)gvtL^OL0S_7Y2m!g_xVZAr`U6bu%{fm<0H}`#z=Jma$ zJXH8_AMSFRHxMyc3AVvP=NzND5__$=L`0`{lJsq;yl0$yy%gFcZKkIe$eZkmg1TQ< zdleP3*QU-!K0I0)8sHR_%j7<7iHh z+i=_JqiX2)vuEJqkC}R|DW(FO6I>;qJ=i21d#^Q_EdS(~NG9NzXI)S1!pH@L;IXO6 zOoicJdYt71dPI7;uJ>^UVAp%58cUU`t27cUqMP4k9bv)AJ`lzN_4>{gACL9K;|AUSHi!w%ZCSF&{XHl5dAZ3 zFz61a2zOAeFC9Z(w)<`5T0SVPZ`<3Ot0R@GUu*X^{ETfq36sAMIad`(`y`FY)y?hO zIy2s9$t=TLtgk+}XQGo^KH#@97nw5649|74mO zsEfFCyvnBkyr8i7{ck^x8Qh_>4LejY_aUigseR8$PTJP&{Z(J+ij#gS`lE<;?{~{C zBj0-n9!YwSX8(k+$ol^L?d3*Z>UAVByaI1@b7!|JqgJf?^eH6`^OuJ|zG2Y{IhQK` zigG!G|8VRv_4I}&HZks4fy?PX9rE-0P(Ey@+y&{ef)ckr_i&4mMfAKZB;`+F#)O=v zB}cCrWUqGQyL{XQD)b^)%~*9Yug8iM8d_SzP-Nyw*j`;gk^uk)Bcmi?)}q(I10 z<*A?Jpgn8{kHN1t&)xVx`u#BSQnt+f5)pW#WJ;}Kc!m1{XZWCWuxO0e)s^=|Ualbr z@@!99xp8G@+pV8S7nzG$F-q+MNiVP`0C;7$NReKn#>!)9(K}u30@6=9&zHWU@6?Z) z_mmBGm5KuWptA@%E5?T*#(0;bv9N^19WIqnsQ?0`GabvqE>CtsuLg$s9?P&xaID~t zUV&cWw~%6TCn#v)>j${tWTYszQJ-rzKS&aDN8vztwHYXLrbFZmo33_HH`7ePm0oMQ<;aI$V7y1PPunTe7brpxq%~E^q9=%&zI}mDmpl(6^ z$-IQKztF4DxK-czYx;K@E6}-Lz&Lsz?9V-{%^RMSbjRiu4gTKJxE-Zsw=Xls@p({6 zhYAU|pVhEX{zv89eE~8Y=G|9%9rj0m=&u_GA1bdr1oX>|7V)Iv;$jNb&+E%H*;gVh;seM7;g{B3@PP z6c0i|$_0MRRc#950mM*4;p$bRU^ee*;jLi?Ts8F1xH~#J`l#u$L&{V>o762sEMdnLkqG4^1W-f?pprer-534tMfr!xF`KPD~r_Aipmsj6H9#!MMHM&*ja~+eA4-Nky zoH&_O^Uhv4wPe>m@tOIJUCnnUV$N^45iP$$zQTt(z=7x9bTZe5-JTA5JPA-31!1jDDVH}{j0k`@q(iq>j_c!futhu5;{_puo2d%mVB`D8+S zUK+JuQiMS$zJm3Hh3j2O##g?P| zG>;^zc(3N?{QmWn=BBvFK8W8Sp-BHlmAsn3XKYo-6=+pi%h$P@h1#;5%vSlK)WiDj3XMb|!r`Q2Js zS$RZ!6=KIe`1LWV>+a-ii3d-eTBf}9#6&By^?M=rM=kNEp5#4uzvi+G^kWnhY9u9l zKfBS)30&_PD|e8t-$%rw>iaUY)MQ9Sn1FR1sr3nnm<-aa^Yu5!ru0$Z6ym@erAu1^WN%W*VFV ztU{1h_S*jQ{%A6N0b7+`me`}Ly;>%dwfCsHkIjZy)C}A=e|pD$c}r-M3R{R9>+B## zzAN=4MeIV|bjS*VwnJkIxU+x@tDNc@2!y!u|8Pm@7+gu z4PpAzj~jC2*SvXK+frWfRAz`6IbogKEDg=NKDKp^S()6Bl1c9r7y21|)ir3BA}q1?*6Q)nkO|2Kxc4y09e=$pM)WM5aYR&; z@wS+i>oh@AhQVXa6n*EJe`$R`3VCU!ye7L8-k1z4d>*oW%+V~q{{NBn)=^Q0UHd3f zQi^m6NP~2DcStu9QYsBn62nNhG)RkxN=Zu((k-pn` z2iDBMv+sNFYhTy3U7Q7;kmAA0t1Bl6}TyTp)vVl4r#k$UAM;L!Lbj=e(1 zFZ3OIDvp|)tRt44ucV@dc7MJ~QP)qjOtmZ6vy)En-`dQ}GGK$@5eE*FkyR`y)G8@8 zE$s)VbwHwl0i>GHb%3h;!XFCY_y6HA0-%5rVDLitx^3BkcODg|f>~c*F9DwnDl7+cGKN34o?5_R$8BQZ(WfF|<-!NwiA&L8E=;evm17?5= z3LKU%+nnkxvWHe!*Z+5KShb*~U7@zZGC>0(bOb07AKj0*ssY`((o6dj0YC=d8HzXz zj&)B%4upXp>RVaue^{hE`J-(*1C;Z?jt*b1B@Dd#L!)Cn!)(<-O3{fiVA>~L4@rA4 z1Ewqp*SyjBk|I!c_uB7CMc#9gDtsxywP;J>jH{aDaaeC7mrk$Y5rX~TA=h|-nA*?U zMgpLtR~C>$w#wl=^_L-ja|0rhM*goV#ER0rvbV0zTDj8GRTr|Tn*$`ZRlj(989uWtRG`WwBvkHBK{dp7D^p;3owvB3RuG|Zste{Xfg z)c1G#ZJxJFX~X#2<+$2t$MBI9fB(@zwkzhn-i}5HJIEQ&)hOt0#@B6@;6j5jcl<|( z_Rq2g+CA(IQA+D9=smy5(6tSh!MKVq?x31%WbK*X5aXE#vHSMUNW;$7$Ov?8-~T&O z3E)}DyI3f<^hc*Y?MNK|M0B+OP0h-I_iaJ=egWEwEAr;YJwZ9@{-G^7kKFSv$2R;< z4ghz_46^aIcY9_Mfp8|Ey&3cME4$2nxA}C4<7CBmqNJ>>52t=n($*~jhZC?)po|%R zY>)nDz=~{yn}|yXbx|)HxgiN_JFi{Q$i;k6`_Jm@c}h8#tFz^${p(wC58sN=vs)KQ zjFahiHKo7SD5`oR_GtkeM>lSjAZhHY*TEhuD)7~l(2Rw#!4&hIJ1zP8g!%Q8B;D7a z5sZ$PsJK8hs)d7YRAurFGDarN6ZaiLp@BK(mh>U%j@5 zS>=v%ed-@!^9Y5H6g2f6R)VzBG4zt&eE@NMx0reF^2ZcM23_?i@Epa5`$|3jJ#OyQ zfk2}caLjq!*WGmQm&|~*8m1V7DK3ic`(FTe{=YLWU=5J+fs~MArePup$c~^IDk<4X zkVS1-1kz3XW=fQLRP>qgYV7c*XgZaBr;5VI*38&G$P(!R{x|h6Y6c}~K7UY)w7Py> z^3HydM7L+~*kLa+I5aF+4|O8@YE$LWt#!Qe{Q(&U3#zc9WkSFY~OIxnWD) z9EW(O=SsNHFV^l>`Pc_FOwz*Oa}`F*2=B@o?Gjnbl({)dY)tYW|LML#9^Jn^3>D&p zjd(tk=TL4sf{20J30WEsbyrN{KRr5GLQXaWZTniXg7@L{(?DJMHLMorSOyqaF z#1L-^kGuK14MrFjKL-C$8*?*#=d!%K*g>v&vQ474j3aM70c<+9QLyXRAwsV({m|7lnr_AdqkEX!?f6@t|L|q~ zKb|#N$w)FYjz98pBQWr&+16di;`PQ$$+JHP(P-Zxdz|e%lY9%9)jl2kZ#290h7P7n zFvrPc8Ggu;EVy_~4x(jQkm%Pbd+qbm3IP#@+E$z28JS|&<%;w z8j!G%CY=!WNqWiEq?F_$Wto*^3f;(IlkqUlsDE&5(RJZU{{vwqMDZZZ$mxg1g~WHZw}vib&s|mfAR8Y^WuSF zhsn7lyrgKYqg+{PYFct-pmZ$bUn$aS3q8dI2}dE4bf`M7Iw{@EEqaKq!VnU2wL~`q z)ybR9MVIR{jhaMyYz03(m%|bkZ>|gwHC6c|bMlnh`Y=5hH&%M zf`PH-)!9^vX79Y&Y1zV>FRP<~6>y=iQH!1D@_c-yOK5;^^pDkyD@Jo+4mkRsxjfK+ zgNTlk18>-@=fqR@Zv z#A4%nR6euNSX!l6;C`7HrvD$>5RiZWe-}U@TJZF|T%84ET_=D*spX4zOs-wke=B=h zphemx9dCO&?g>t7UMAlG0QAmgoUw&$h}GGd3~~OBSWaeg%yw{|X`wE4$|u~XHic~r zy%b94^frCt8LYhoXR5dAtw)D^SMKuG&7uJM_wRWRlW0J`))Nz9BVru$v$3ACK$qky zKaSa(fRQ~szl?KATr2|H38fE-XlngP_FU?-2`~<-04|uP3a5-zGt%vEbko2Ghe39R>udNWHTqRZ1? zP}XjKYB8VM+DGI_>0prGj7RZcWso061tFxO8@ z(&?O#6!%J)es`2raLny;ntl^Ils-k^vfx38sS6u@$$4wf%DTnwo<0|&8D;<~V1Z=*T(u7{cQ112$8)}in-@9+nc|8#J< z=0=+Gb~*){oTB}j`u1KAG&jw771837M-KH3_%Hnu7~GZ4;RS_ivmNbM`rdwmfb#2c zh`cBH2P;wl+!M30xG^T#laoYkVNESWAM3d#^d6$3) zIA|b0(MamYFR7Iy2dW%{>3$%Im|>{1_N4U2K6AZV-gNqr@t?AMXI-wTC@wn8%a)8W zCb_wk_*{|@S6yC)r6+yKw_*;fd*gN8#nC`zlpzOHMrF_eK+?c-k8*2kS`7fj>3=mS z!Bgx2c-e3p@ZFLF4EnCIF9A%u=;W;T^O1_KjpL%16Tk0sk-V`;X&tXx0YZOgC(IaK zUvQyf@I=6%$JFZdG

sFD!!ou*$R2@9wg^^px+?vQuW{Nl@rIq4@IE&3?N>U93}q z__d>-FGXit{UhS)H(sZjpDcFOISumv74*^#JoMVw!)E8KoH1B{PTC~B%uw~Oa#bS4 zA*?B=$+&u`R({gINYd`(_(X7+#*f|MC+YRgOZg^QL3BuGvVZVq8gxQ50WHuhcU9pv zHDr`m8W`v z`y~Hw|4ozD!O8--m3BPxcV*8}Oi~SUozlg~#1%ly`kvVx<7cf)D?bxT)v$AR#!X5} z`n$KMBtokSOw4onXaJws_+!d-m8Kxaat)A=;eeeS3}BrN0Ma{v^pmL05QMEKdqG|p zv_lhMvn=Vp8&<-9DNd6z;QbLi36coJ?J$D8!*LNk!<8g7IT>Zu|C>+P5q3mdSYA<3 z@9t;sW@8Y=9e+yG8Bk6$>xvnu+q>KAOUF?rsYXp-ejDdS zWTL4kAN8i1JuQZ7Xx^vYnBrXco5Z0_n+&~+0-UG6CCRDGRzj`zW(x=P_X81f-?E}$ z;b?o*h5c*Z6HsRWW|2VK-NS75KZaz#r|IatK*5jZ8jQUYH|+1YYK$zKD{qetV4%&w zF6qL?lDN8HVW%6@ zxDt6Ho(~>-Hzmb5d<0XCH9m8*gMe{}+h2gu*aI(0r|}7H!rYt{KHyOGE*`mV3QJ7b z9b?lxG`Fo7*?XO{eM@4m(94C(Wgf2N>xbb=ZH$r-hRb&oA*m>xVOKUq8ZJ($tA3W_oe21KV*WKQP9x1UEH!Qc`S>%+r=AuxF-^(gboAMl>@2&LL4 z@IsS?H0(;|h$yGf-K&zbx~qJq^_wu8a3O;pW6n+*NyRl4;^lekbt2E)UN0N9oaOmp zk<_(1nonN#%&{#yU-2zlOy`nDl)xa922yBf@gw4@0y4y5&kQS3`U2GXvcGin%Yt}; zl5AO0DETh$@}{%1Wy9#5={CL=?H$W>;MpjrjjUMgzaTKw5x*j3mVe-|GVEIFlg`HW zSySt1EFt$Bw>r1lO%e2aoU6vrADpQ&9pCX&sw&jg$#BAsu24gY^kU$m52FJ6Z~TU{ z-_TdSwsIdjD+*T0;t?LY&<1xs-j}9bxa10P!~~__BaM<}9Z0PUDv2wfQgYgzYJF;# zu|A~smyz~I1zvCdZCb!HhF6IxlZZ#H%wPzF(YWRz1MS?&K3|9eiu0}D#+coxJE#9a z%3CI^@R3qvT2bMlo%aMdfcJS%<~oUyM|R_U`=45azZ1dS+S&>L3xqZE)jI^iaec3H za&jOsw$^=&#J0V5D+!S)Gzi&z-Qh70(}fGOL&})&mrdITQU&vHI#XkZ4`a$Je$Ny7q+lhmkE+!Sl(6 z-9dB_xs_qbn5{|Q$MtQF%Z!iKIql`x7m|)q)-X>$L%D!}gd{@V3gq7{BDoHsIQaq^ zMY^)<;}vM)Y?8e=%O`xK*iju1I?HcO2;QZbTS%dl!pF+r#cf!fY=t zo4Ao7T%>oX!d!B4I-8|9AoTfXGyxxw!t=c~yMhTG^V& zYKM4Q5VqxX!=r3$`&?iJnJn1_XD$P zd}L@3IRbzhOb$%dd1T^bI!kyWM5o zDvEA%1&UxziUf?oToEA&ztz=^%9j7h&YcK5JGP4mMFr{rfXkwQ4)R(8to!6@QkD6K-1HU)k^iRE;)M z6H0Kf3PjEfsQ+1har;}~0j;Zbz@j7Y?qdFgAAQOXNNv>t=y$gx_&?%cz&A=YI9M7J z+_K;8^nJ^nVP@;DyIV0N@CTJA{bBcOp;(=t$@5!80aZU;ZF$RHK=>vP->=~#+Y3SA zn5cxdC2Pt6I%Ny?J4NRbar}JZ$w8hg-ceI|RhrP8+<)?pcE?~NWj>bB2JY~IX0gp$ zUfzs-Bab|9tz-7;c>^`^A*iz?AtE$?(hbvA|1BE6!g;LMazamh+)rRyPKSdEy=wlP z-aSi0XZw?nuXwRvD1UXEb0p|+3(s^0_vG$y?Ddvl8hyk9^@z}F8^lpg4A?zy-pb9J zl6iIGO3#boWj|8_5auM!I9paD6l0no!r; zFel#6GCQnS3AIVWLk)D=7If~>VsvDeiTx+G>bEr50{^hle)+2h%ga_Y0#ADJqHdJ4 z`wNOayYWmaV}@?^d`ufJqXlSbo}4T@U?JusWMhTeP+z|aT)MsOw{eEaL&f8a$Px0t zB*1!-?4gdjvJ4h4V?>Fx%!g`WoGwH;IhsE72ip?qo4Uyqys_QRhSL2_=ZWKEVRqp8 z{+HT-K(Lk`?J6*`i%e((tps{(;ZE`5LKtwsFk!Xu}AaAk!* zzz9$dDr}AE{ySF%Xs&8NUnQY-o}gwN_=y_5;je0!z;s2nw1m}cuf3T!*2;u8cNOcz zCM?v>P2N_cGJgz>u`3637{p0B;&O@2gk9|-X`bqe2~4c1z<)eZB_<&Be+-N{mX}M= zju1>VU>C&i7NVFtyD)VYc5`V`B~yuJPv>*6$6`yv!+G?4x=CobXE2-s)jJo6z(t&c6Y70uf#CcaLK6|56g zVqi~yCvdx=fE}y6nCVRo@hoA)2dx3`FB#q9-fTC4dx&J<>SFoI{Jf zpwdjVFaIrp6eY$-$HhCIa!)tP9+{m$nZr6)ODjsDfS>1jsU&s+BU^Z1x!?h_Uuu0- zF%HKrV#P#T5_{P9|8uN9V6iKL0!@BCfcCq%Xp!BBS`C2uVWI)wHvv)6D8P+n^Z%>nZLth*m^VYU^%o zaJ1v9vGvY5S95I(DN-XEN~G|(o?$n7BQTo=VI}c6*h70JDX-caX`c(XqV|Fq?zjB%eP3__uwivw;u zU=A@cgN7PatdS=&cRRGlvR+cCvv%Ln0uc(>;mg^Pe^y(}D4aYt3N%$JF%pNIAO}+jHI0 zOs9tWM`F|ANmqQ2%%!O%yi*_GM|i6Y^1qG{=5dx-sllG#* z+;V_Lm=mptVsBlunPF#5>60~miZA5WV3B&B!m__7_)*woJU6*_Yg=LIJWT;JcDs9) zM0#ajahOp~Jl2YDKF8O^;ylpFQ5e4{)gCnrbfWhr;A9WasrmWG0wD}WgT)RGeyGPQ zMQfh2XC}TVhhJw3=6~(=1`qrw|A=0><6Zb;s%p?XK@^kqZ>>isQ8V$yD2&%RD`fjG z(<}0Y{SOS4DQZ76m#fPbYU8hz$q`kdTm;BCV_FU_`;c*+G_zcq$t^MmCvNTM!m5^5 z&4QboS(&p$I`B4F4A55ajz?!jL6f1qFpQBM5G+A4zf}rsEVW5}WV0Op-sfM5fNv{* zG)stkn%|F-@UCRPpx5rQK{o{7LFyDeHrx-$4Iq+^Z3WpPgIqNJv-ZN9_IrGcrbYKt3|}yMa#fc`=Jn#M4P0 zDJ#0#&y5svMH54N9uiOp_ah{pL&?%m%fs5>b7DP!n4$#|h` z0>FNe3c5EQ><3)SQUC)}s+p$B%)3t!S6=@0>9+a3160vLNis>lnsGaSIFAoIVs>Tu zFbRmWZ))`5N&>h?N$4$4;W3ZWOY*D;yhJ=;uL~PHWR(IFwyQ;8bO0KT)Y_(>7B9CUf#pbR<5;# zByZ`5;5g03xUb9|qdLvsP$yCoazxKT0PnUkWbA#;7?X6!QX2(9Mb}{q2Uxt6A67M30yOs$$MV-T)QV#jteMI`U#9xiF8zEiM7h#LE0Z z-LE#CbMhTtx{jd>d*?smLtWfkJxsy_c^ao&Q4xBr0|v_8j*sdk2FW|QGRj2Lh?NRe|>{lm~@~^2@ zRxXR?XwPlcA1Qz zUQdl>;0VI0i)z~^78xq$$z3Snr^oLfT*`#4Qer(IYe&py#_BPNt));xS;dU|e!f>F zYn76wHvTy zN%n@^RaCD~lF_H`HFmgs``5=}VGTrRvC<`8oUq|@MnN)^>k4T2!w;Ij9V2^9EpZOA z&S)3e&KIy_hNkdi8kta#@THIhvsm7SoBtcj%(Z5|L1j~RRoZ1rj9quUYB5_%8Dx;LZZT_Rjyrq+}OcG3q+j~ zAe^Ft)>xfYsleqNa1mygdY2d|%A!_Az=9P>s4%*sXjW_&Cim)CE$R9>-`m>vU%qiR zZz#*PVBiJv(%+2K37ano$h+j_{??s)zKZT0;5?Qes*K2D1107@sS!N86Lr?8ENRf! zv;R`XrxYifC~P+16>mWaE#ve-oU$r43@Q`WJzfJ+pwAvMU2?wI!Thb zmE+&Ax8%Vm`|or@%4O}>;L9u+*2fU2Jqoob6OyVngyjOtu(WwAFEbmKX$4$xrf zxYbI{ok@zfVJx2tj!2Ild=m3jVyq9KWnmAQF-*ihkj-{|>r7BT`rEdMu3zgIZ?0J# z;8wlGmK0X8@!;v-`92Q7hY8w1#POU%g(bBJ;-m8Gz*|n+Nhn(=dc282!6(l1sZogof z1rg~G&YnE5#P#vm$shI7l}@ln!F|N}7(1*}j!IQwMNH1FtWMk!*2M?+T@mR$Nf_gF zrnwC8N__R;kC^hOxEnEC$svp1YfemOTDUYab)Az*>24us4^;1(IY;CNH|0mXGsk2o z?FBA^ZHesU>;KwrQB>^CttZLdaaZ#f1jXWf4|JR`)v8Yp9U&k35kG(Bhd2-a!N|4F;!Z4jLv(8m08y=$3X-gYUWA;Z0*g6~J491cdBxy?#>i=fC*bz7X^2 z6(`X=Y|9Bnt@}M1or11ii2|)Hmadj=@ zIP!B|O^=tqeyt;5^orlZbomQ?p4cQQZFWTd(Q>OqXh1m?MauDu?8Bd9)HC zBCPmTn8vwCryf-*F|2dL{&FUDq?C@fHYt}uS1Pw*o(o=7K!tGk4<9)8^u2 z3H=4Y=rCL^nIJ}C_ylmcSX`p>s1QZAOz^EA!+945$q))b?@86X*(g>~~`SAw(p#`9e zORzife5VaA>YnDpW!c3+$qiqM5iWW-!$5_Ypn-M>ngj=RCr*NJZ}(1hs1A^HmLARh z#e)Ts9#9I-?Lf6cCKR^zG35=GVtM2nnu`xG1_*WO%iSyuqS_(yV?;<MX8<#;V~C(6!F&)~oaxqsV4iIJ_` zX510$!{mXNcupT|2@fE&y}i7oJXoIT^=4d2d_eip?esR?t%owXz>U2=fqtT+-vT?i zl?Z94(8QQx5Z#nJedItHmVT)VYZgpfwbcVdKg`i_!Bf25T8cI&ZBH*G20W7T6=7DR zYiOR4W*8PBGeQd76(fvGqz?ndW1=1CO7joh_ymvy`?KRcr@PGu36OF zv)2_!fI7$)yWk6VyaEZyPry)MfI2-rJ@!O=mO}F6iaFI=6bXPT&D6eZi#Mbzcp;HanuNH_N zP2}f3YGKYWwZ&UeXW{li9>80NTsn6Me#~GEp3Ug~X9sg1W3Q7hKm!$5oXDwPB2@}|AfC0`Q;r?z(X8V z3}*t7KBP7=gZ&?N0fXH*gjw)2(ec0-uQwOMc*)p1ugQ(OC{y5`&77 zs71D97ak)Sta{#ru1u$aI>KS8ajB;2Mp8W=_S#H(W!Lr}cpen9wvZALb^8 z^Qwr!Szj~?L8*nHbhE2Zl%5G^$@Z;$mw=HQ^Pj{?<+1hXh(759an4vyQ!VmPR)%zap1G3yvZIR=<}qE&^8+-a0^{LtM-DtU zt4FpRH;w7qEW?|fHk+Be9Nu}@Ejb;Dd&_sNt#f?=d`81BBqCmtdSllKRuVsoD9@gi0bVC~LbkO8|CaNWc~iH;YH7x^esA~& z_BNipR4zVhnH*6Box4OY29sT}R|luuCQy#l@g9qb1v)?3Y4(XM~X;j$N$-Tn16>m%&6j3-S$kL@70d?RU2R;RbOPc0UnfZcg4giDWQ zh|*kk@+EhD)k-BxC$1m0s>FL9KkY9X4;W#>AVF+DIsMKiB>B(ifi-hBlnT+T1(YTa zzCmg>koqDXy=k~{Jhq&vM524`U-?MhbgDZBW@-|!GNZ*(WeHSR{}HRfTQas&nBg@R z){(XzHKIz1YDMVfJ_aPpu}!6(QCWxCnlG~(cLZ?Q`RyorsbvxD1W{;Kc_G7r*)K|h zuqCD0p&>b!E<8S*E#HZ(%uKa<(FR7Kr13>A-&8yfsrhfzEEk58JJGSsMI*&v zPgU3i*-;{-NM`Q<@k7Z&C)r98_0XedHuOQBnek?B)xH62q8#Em!0wc*$)t?U33tKc zsUMSi@HFT1hd*W7*F(;`^a+o5QQq3Pto)gs z4xvGvadllfryi+J;%!RKOiFDke?dVG6io@QkF&dRhPMzGHrAOkpM}C(zc3N&#kF^} zvWNFgaZF47fPfSW@{Zo66kEFv2Fq1j&fudh#T)n?0Y)NZyE|vQhB$^?i|Q z=cE~qov#^=*(JQ3*~kobBPoUy@t&^maf#`WteS-ud=n{(&0I027piw?n(AU({4gvazg!JO3+u z>`@=HG!G@X3j>E4?iv}$0v|c41;T8#;<*Z}Mr?9hPwQPTnGgo-ynu6l%QG5_G9IyK)sd-jtR(Cj$^O?hyaGB)Zlm)Wu%`I-!Ip(Xq0_Q9vz zMTv2v)kX<}PoS~-HEF$9^QNCQc>E|3egfUMRPSp|u~X451Ee#t2Q|@WMf~qJ#RrxJ znd{>8J};VSvm!0ySX;_lEFCl0{xT+i)?-E#6_F#ZVj*3@PB=@gF^L~f3T0Gb&lhb= zR7@)~!36wTGkq_aOL@L?ieO&sQb{q!LeRw!`Pd%PC_@&w8DXh&5`-U^gT+egsKO8g z|AmL-hAVG<{N1j)Fk>FgOR6f&Iw=tBB%shWiq%u*s^r}Tj}I`(5yBXm z7MAgl)wTd{6f!6Tuz3eOriuSRp`Oe~Ky=vxQcClB=cH6b72*p%1)@tL^wI7SQ7nW} zar7nAT_q_2GE3^WkX6nx9x|N-;mW}RuK11W8M&W+j9!xk>A z=m|}Ec!o-o?Q<-^)}oLdu5BTo_WJ5V$nJf_;2j-XeCMUphbbz$k8(N{=i5tPy2`OW z`V~0|=XH0gZ|JsH<Hk-aD^Re zXGarks}iYBh*Zq~wHA}kd|cEb9qGdYzcnO47QLhd+kU|*LXhC&$UkGN;4dLTh;D4q zal^G)dy|ho#vOq)nb(6=G@vY}accTA{0pg*-l>P`Y_wYfos>hWrHY+A``WAy6o?cZ zHJELOM7XU-1w~Mwi(_1h3Jkp(=g-oSq6geDI2uS`$3&B!Yc25sF;zEc5@**Hu=y%P z510F|9cB^)9phQf44((~6f;)ZEsr?B_38`RQzLjW#5Vj`8+({L)!SG|GAJ0M$a+sK zm3N#ok^Hrd{cSb&=SOec zEG&|!0^F1gp5cVii9b(xf%%355FQ_7Tn0#e9(7GC3?^Dkc zng$#1tn}tTtKo!^OzCNh+)nVFD-?bFI=Ryv4}@<1dmt*y^v;s-Zq&zh36MUkXjvNl zyUJ1S6ljLVwYt-v3$A|w#@|%JaDTfv};l6 z3ka4uHLkZ2whPj~d6GFb5u}Lwh=;=7=9Wu}8gYF3MUZ03mfZVg(bm@wtpulLD%M?` zSeV9kW?)ba9x5z#&!ie{h|3QOX(EeaUNzGya!DXEUEp?_{CkHZBjLF%fI`EJ}eRF&0MNb|Y!*&jYX zq{?N`3tWr_ZN_h=4+Sf^2Hta770jr!>|71{9O~_n&AfvsDBP4RshhN*^q~Gj;m4-N z6u)ZKilmy=uu*=qYOpSz3*=rCk?Cuau%terY`d|vS2POxoRg>lolu>~(4hgA2A%DolRWy5+s-#nA^`g)oL4n+AFvqpOfCddh_5*Dxy zH%Wp(J+^{@_!!;OlHtdUiD%XXqpe5~Bfz;!(X#arNF+hF%$jepQ9`Mj!>Wb>ZL7`- z8s^MHDe;jd%rR;T#^5ds98}>Gk9U#qMm%D#mJ{Xrb`{51Rkh^Tncq-URuKqG?078_ z8=LI9EN${Wxm2;o+8Ic!-PT&`(_phg=AYJ)*@kgVART69)P*&b6zed`&XGQe^pQ`} zjAx9>B9!Z*g+mDWl19SI<}>RLNX;Z)Q8wpogfh$DC-x}i)HkSSN{O~Qh_^H%@Qqdi zw{#AyZ#ghV{22C8fM^&Jfy;IL_2$)qjTM^# zoh&s`3L(@6x!!<>B;`GurRZB=p5!u%1CUfhSW3L(Myi;bjcN2@Ma3Uc>eSO{)du{H zXc<8!y4s8} z#x6v&djA67LDq-2Carb&v~4teJwzCO^&$3XX%&XG)5p+%Q@%a;5a^)?t! zc`}FOC|m0tO6?!0+LN-YJucu|A_a(8dEYjh)%b?oz$z}fNIIgXbWuj&>1qKb`>&r z2i51gp7@MOkeblGukj(38DU#$Tp4A07C!oIiOs&H`PL>5Vk`ar>2m_*!0k-4pTOKhSBkW!mf!h#O~eXO+AOu&u*zH(TAZNL}1f6Ry!$h}Dkshrakw7~M5Q^4~Xr3#cw!uaw% zLFJ?fXrR_-*?2dg4xouE25o8a9}yy%S=zZs$n&`GnBi??nCKI+*pZakI=8DITJ21e z3??x2PP7WllO_@%cL2Cqph@J78C&RmBF&sZFR>h@%&g)@>h0j5TN3xFX6o9HMCWMe z=GP!mCh4;i-8h97i{9iHB0_-W+fdRq1O$iDmlN8FSf-B_ScR;BH9h9R= zz&_2wLuCpH6x;S*>Q!H?2axi1vBP@kNWE(<9IrD)9~d=jS4F%|f1OM_TW7<1OF;_2 z^VsOJZJb{r&5Tq>M6eGU+~V)m;Gc+;7M>eicD(X?9ocWh++cXGb*X63O@Z(|)SqylyhfW^}X(&0R zFpwO|r{iTF>rx@uXkCM}RnOpgE=mRvDR z@3*6a*~Fp%hWhLFV@{+h$S}mjD^c;=h>)Uz;Zr&ysNcVpcx*fAfq~ZCYw!1YXJ%%K z8;o!H@xS{gw+#*M0RQL@vst-sp*qs@Qcr?{Ou%spjOh+mWyhwe3nn^@RFl?4J0={V zSt zCGuU8vpgqOh97iWRgUH(@2k$(gQbAC+278VCZ++a%Tf(ozK5&@YMb+&+9l`v4W8v# zC4ODpE3RD{XGSeH)BlW?8~CD_XUvAkaDHrQI&VkG8TH%$BaHg&K%K^klF9`3Wdp8b zSsIyD4!_B;Y^@8nx^_!yo>WCF)jIj4w3;`;PCq|zDkGMud4`!-F=SPi?APFEprqWaimiMpb>G5^_;ib*YSJ=83 zBg{Ni)WLz-r6Om|6LjQW-9JJ{RjSTBA25somIrd$Qt^i;I!5^ZBbBEy@x&Z#+$grx zF_uj-&aA=onjxbHVzW4Z*j^2@rJkeTJ7KWA@@QvCJ#W^QO8P9pgNe=zj`+p&;bt-U zXDW9X;$0FRlHvRz3ql8%0IA3ff5?h{nKo}qf)>|S36}h>b&e8baee*n#)m}xt=8RD zU$t+*%}JHUCGVXKje;kA{jq@XSQwT^|)HDuuOzNy`xU)<;Yg;P&BcyantAmJMzd+UL;{@A}?OUjS zvDO}Z>hx9IAKQ2nA=~-#CmQ6MnYL1nFd!j+-xBN>rE+xjn}2HXY20s+IQU943J)>9 zIT%1So;P5Q{Ct89*kpSFlPg1j8Nm03F;57hU}x7l#+w0aK@TfTtCSoZN`r(Zl`>q! zZh^XaJfNQk0S6ujuM8jeaQs zg|q*Mx3><9a{bzdWf;0UB?gd?Zjn+c1w=pzK|n%ETBI2U1nCYzkQS7*=omsmS`-XQ z8j)@oX1?ooKhOLAbU)wmeeXZ-ad^n6?9F{&*NStkb*{B`qcN2q>qYg}4ek8boaeP~ z4~R*!%AMSDymL^fcPr%=t#^{5Oinp1jpb^}@c6ry_pftYo#DZ~R}NGJPWgD|obT$2 z3DQ(3pF=vc+^QR`Q}8-IUfxDEkf9-709SP2+%or9qK0*QPIIC1-$^Umt9uZXZ(_qu{MDEP5jSSwG`!q6m3r+SnQve?Gt5oto?ys{$y z_KBCXkggiWm8J*x@Ez6k1WWt69p9I)Cc-p7?W{u7!f&E*)~kt00;EOe0kSvmR;cxD zoF$%nnY!|Z4WF5jWR_@Es9N1NFz#0=tTlRjTrILy@b#%dl)_P1+-HFSrX&l590#4p zsaVEwCw-=Bj+)~{^;>2$wrg?HoYWpy7}=5#+%>8{RS=%RYJ}vE-1P|w9Mm+@EXTwI zlP`qaGI*vD75idE+P?tcw7o0~yM*>YF$BUZo0~8qUQ$;l$jZtB6NBs!$pMN{8@b^^bO%>pnPBJUP)iQP^3HEcobVT*0|!< zQ}(K(*vbFMwerV8T#H=Ngg4Kd>nH09ErtuYN1IivGJe)Ui0MxMJBXtgzsm1FgLS9A zHd}z>eACrWzXpk0im8vPdv>aHcbg4jTRsCdY04 zdNWi2_T;^P|32Oh9X*f>qK_jfTS_n9#KWX47>@nb*Lz-R>^En|Bct9Sez-Xa$c|8* z@Ey-60h8M7^`&R(cD_Q=tPE{%xZ0=#>S9-oLT}{Gw-c> zDW9B}Io;v0abvr@pdOq#MR9KhkAhYZxiEWKI9==bGoP{{GC?heoq@!%P8*wF`jRmRpA)kiZTlJAfdXtd%NaSHq zyCIKkW%2{v5UaWXZgWgyW32t_)U}?G&IJ?2h_iFz= zr(fURk%3+0G9!m%q@*)@{v!H8AV=#!sLgO$bXxa;zfF*KZ|t(tT7y!(g{8OT>0YPV zPHPlg?7VJ6?_RHSu&(vTRsYG`Sg&4WLxpMOD!A-yyP3Pqw}VEZ-#6Mw_jGat~_>BKBxcMJUKx)!1u)Q>XB?W zF~C!=2<^SEay^DV=6Bt@b=3-w*dZ6BxR}mJ`&{}PiCq9l3`r@y4ZE2HA+kkL4qmHZ z=?rntSTv4{iT|{i7Nu;L&ovbHv0X7~nTcBn5n!UWpQq+z{MBp{pJA)dwps6yCj(0; z;=@67WYx-!dK1-Qvp~ATzR)XYTfey5K$>MpnTRN0Fia6#vSD?}?GeLfe|4*SPa^ zJ7h@Bi#??QUaT0@F{kllyl3agz<9uLl&Nn97E};2#*O$+sm*cQts-|!-E^6^SLah; z1+v%n4jCWkf{Zk+)Jm7#^fhfFwdMUVNs~*fv}p>fHw8`%uNQCM427lrvdh%?IUae~ zaP=Aebw8)ucBO$b2)kF+g+O%m}xrOQLR?GV~-UA4MPy!?)x02_XHpxHJfWa`1l%Ew}DurZcd`DmVw zPAR{%lo+&u|DK-bGabYPwqFy&VV(nNr)Auvn6FCr${P+klN4_?JWlx3U4fiX7})Yy z&9$BEaD2eoyRFg?3Nhr?M=3_6iclX?#Xt*HbfI>yDA?|7R z^9^mL;{KC-$;`ELaysu$ZN~z8B>+_SDsMBrL)*g*eJp)tdW+WN3hw%Y4EA~Wiq-LBVQHT{9OO)^smdZ!`}A-OcTpXo{yRmNiB!q>6W}_PC0#Kml)z| zZ=$pA?0<(Yw*!t`C?ikayS7*s`!jn&Z7UNo!Fs zWQ|$K2|tv`_yswov+EP*NfK2T!9flw+N`7Co>3u3Yns7wB!pqil;JdNaeZ!*!P1|_ zG<9#eh~`_}0hPtS@|ufRRh2F_`*?%-t#t79o+KW5k6Ab?D{D0p7YuFm1#<;8JUq4( z4?02b0DyYE70x3Ke4TeSW@;UFidsz&k}p0KCHorW;RdfKzc#ck>~xw_(S0tX&5NJX z{q|GJcV1(ax=WYu>GAprteu`ZexT31-e7#JUN$B)Pl=cBC;#>%||&W7Y| z$izH)Bo^9KnVY@vUZ+8a|4t=IXQo`rbEzHMqg}7UG9A&BsANZ(ONcw3xrifYf754Q zaxreSdV2XUY(FmaI(qJfnelxn!e?90xH{#4y{(e-+&;;q9p|DA(zh;8;|cgt;k78*sB`=0TysLkW?#?%rk@J(S@;@a?wAkwV-%wn+RMB zdHXO%}-{ovS6@!xZ_2dg1WDZ*odgL^%A5* z{n$QM;ne2av27-YK%Z@>Eo+MHZPtPN#|Y$MGAYc-S$kF(z zS0r`51Sgl-wi`(6ShD|O)!=zLL+^v$1}C9You8@Z_Si(6j_J8R>9hcZ+oY^%$^3qx z_Zjf$ln!b}D7d5^c(+9sFRC$wI2fP|5~NoiQ~pRmVt59#FNA^pMt2uJq(Q^Cq5Ud! zanluBMKDVAptS9?VTk=;{u16d^9SdPL&sTsqlOk#iWI(0mg^WqM4vqNs z$=iO;^tqA>|CzPj2XAx>S#&tpmQ(9jG@G&=) zee`P%8niv?&YvbI9}^?7JGm0_^2Bk2xWS7XGtdHKqjD3}^XJdYjZ$DpC(J!+jr;6$ zh>gas(9P?y=#-_f?{k@fKk38gKF!yq!Z~zRtB7`A-A@`8NBj_Y~epQ$3m9nuaf9m5g4P_7dF>4m0A)CLq$dAo{>7u>X7q+f3-8<<9u%{S9s?5y0ME zipzZe5^*@X{tP zpKwe2pXhT&+N^w3NKH-W7KN!$VCg67BH{PAW;`2z(4ug=D^l`1YoYEla8v1893L!R{%>s9n{KbZ zcenUB6#agdnUboFjjFKTlbp?obhs{%K}e`f=jpb;6oH&50>wgHHz8W5KC2?M)GpR~ zNSn017nxqkh(abM9olORes+Yc?|^D1ureWbK)h;o46C_#XNVz59f>W6|1GH|p&;d< zEgiN-b_q4Tuj|lC3Pzi~&3x85H&-Z`aJ94hNk8q8{_~=m>@U%{6jfK6C+vd=Znis; z+G?5!n)h;9mXpnRU*9wX#X{K7WO7I|^fQeEUcFu6cL#plyv`jDdAe$;%Nv|_Eb|j< z=GJV-i;ajNT+WUIPMq{7P)rB;$fhXbnlaeZ48q~Jo)ki5F9kPCRJ+u9#o1s&8>@kb z8_4dZT+S}{-rJ9~joNfkNZG@WEffUUw>)+cz~eB2Le?~bqRl!n$f#E|T(UwjbQ0ZbW6y=fNQ%-z z2q%03sh3H%uBU2Lk z7YQ!`_KMIZr}6_!h`{1H_3epLkF*OwiMm{w6yQb|pwl%^saEB+a)xwlGEi9YUpE>J z-KflC7qTm;=^9=8=}^NNW4-Bcll$at>jBK1>un#cIfck}`aSQIcpP3xQmv$Yl31co z9-%p+%-r^mohdLm6l=0=8g8fby)u zqc9?>+4x!*4*9%mQuBLiWnTsA2wdzGp zM?98`QPhL^dlbTO?`k*#y;m69{T2jVOsMpNX{Zy1qr8EOrjJxq=OOMe5HvPC`g!ow?k*bYEP65JfhT?zCA30zKp z9c>t|l^yhQ6AHp?Bwrwg+sQi?ZJobuvk@OSrI#W$Ri65g;$0|sOMeK8ZOabyF117W z9d~@~moyY>YxLp4ZkHjU+C4by(e#GainJa|s>id_sSC z)b@mDq}~POc3hk4WJu-(fIYkLZL@sehxqNgcVKiZ0klgKdw|4^le_lz^>tj_XbP*H z^6w*ea&&MwyxyQA3SKdEy#o;JK=s}pj-@YxUR8}taFY{Q?VcZ3z&r8>I!-@&n*6iAI~0g4Y3bxMHcm}Y;EB`+?k;Pm0QqKbIE8!B@N!&qS<|aTIz8BIa&qz`b{4ShWjP&ah|rv1IUpV=8Vl^vYU?9^Wuu>|}1i(vN~66&xkSz}=jOuw(IfX3ge;C58zN>ok- zNy*6cZ`|0N>|`;y4Bp}u)mL?IAOw@Vc8&{gQ<>D~*ftzha_rxpv7TQ7dVDp?Xh@)4 zgMNFQ{!FPl_aCsaD$9h$#d~y7$my1#p1!`Y?m0gJZ+{$cv7z>a@Ze_G&zW>2^OC3& z7_KCDb~FD9v*?}2TD?Mb^MyZx3x>KMDYmIaW#K$o*62RxfN_y`Z4f|w*&tgKreRHU z6|Dj9ccb1Sfrnipf7~7!*Oj!fSNJ0Mg40VUmz#8qX@BPsw7C9-A1}Kl)|-p&?msLn zY(Be06e!Or9Nuji+FoEl!(F5hW$EH_tE!&l1K z?~jL##mAU7e^JHy3Mt|zPnYOCI_D$;y|F^Ur@i{S-I!7bCyi$+Tub3~H&b)tVrygx zu(7jti@Y=RF#neL?m0-|J^u1pFU-g7Obi~9YL9iWz>~+6f>i3j*CCh?E zot&MwIB{E?hb72ketJg6_KgGkw;#dn&!l-72{URs$P;gL64!h_GqCsGq{dSQK(>J+ z;ja;b21xs)XZR^W-#V{FW2RA^1Q#v=|CV{el-qk@W9LdP_s!RoKaxKuzDI2JEm>FI zk@MnI;|tc6AMd?FGDx6_TOcOEGsuqW!Nuc*KV=`HcE`yXj}x(wM}7otk$G zHsxext$UOCSVZW6H}@SC*mQmxpAdJosEax@?3DTJ*_@p-A@qFn2-<2syxJ$nq8V(K z!5;LML3<2{;xciACfIXr?%a!WKZb5d?P0u$W5-w3?_N67f`QdFEB9W>HM$&yBn+e~ ze~!kTp~Qle^9;lZIQ!{IIVp)r2v8$3X;{o(gj!V?B?OzeDKZzFe#fcvv+IJJWu^VZ zn0nt|Zw3>9eZy@9GTMp|UJwF^rRpAz4BMpIGrc=~ zOV@JIU5+)7?@Xt!<-wz^W&`Hnp|eDK2E1GsjUj9!#*Df0rpN!SboU{@smuCd_I(lB zZHv&;dTW<36Fb~Tj9H07S1Ddd0`M_%@h)$iuVO^ipym526{YBwC!A@z17H~rZHPWR z{_7);z%zt}vdSSg&sD(BSBqQ)@#3QpivV$$3KaqEp(x5-o;-&NRI*wz3u5v<>+F@5 ztzbXTLo3kK_@&WD44Nc*7^qB_iEF&jk0b%&K)#U85c8gfm9qV!M5v35s#lOidWCu1 zhnGK1HHXK&EFajy6$A>K_e9qt(YgFCrtj(qITA8ndywv$&)?HlDb?%3&!)IQK+-~t`H(_K zB9BrPLbV>Wpe>~VI<0u_c`?|P#52g56jL7zug3CS|?HFz{(MQ5*bLry^Q&C_^MH{?diR37d_jxaNoP4r+-rgw?#4|HoqLV9~ zxU;RO^k)L;pJeljNu=w?*+Hc{tr>m_A!%aFd82Yw&;+Tm3F2|otwGZj#>4O5 z*MC^_=>p1v_1t2G#>{w$md7s#QmpxWAnZ9^`md+~t98AP!!5ZMddsJ=GvIJ)-p)1K zjC(X*ybQR03CyGzJ>vLYkn{Mj*Nr6L#WRCYT-l=aU2s@`WJAy&A_rT+h4n%RMfWXBio8+f5`b$>=snRYdST>+k*s-km zn}tOlIm8(C6c}zUJhABFEDd+(`OA^x##Z+-`r_0@ja@x+v@3vQg)`Rjp|bh%#<>Af ztvNE?GNq1GLx9acho7s0!>3QzeH@&fJBEj;p(HD`Ht9>f7r*TNFIJu(hYS_?fjBvk z&Vgte|8&{GS^fJY<0s)49nVF~k~YK=cM6Cx+2Gag>7pmbnK_wINL4si7Sn2Z=Y7SX zI04p_F3|l7DTZnrfNWLRAY?u-xb$WvhA%Lwpnn&tDE5dP(N{uhit{|N{V!)@;F z4|#Gu%qUzhLGVI}m%L>q{a4pZn%;ivEQ{Hd&j^yUS&+^nL&NJfFUBl zvbzQv<-qUhsa9P9rs^3}ZNCysh(*0nA0h`G9?m6rU=BUZTLM}1R8FNI{`vy^lnPiY zwRsfi8CNhazw#8odMm-Da^Mka7C+tfn!Y5f})_r@0fGsw719=*S6)JbpZ6 z!){WMVy5wA9uv?XE}e;antX=j3zT0llVKv$hNbi8jpqZiF|8xa^`}_@-v%X?%%oEm z$gZNMw{_>6U_U7&NijQ2hkKgq0Tn(YUq8pMKXX0YIW2whLj9T62NI7*+mSoUO7(_3 z-nr{!H#`T$N9QP49>cNm@z>w_yq#NABUsNfC@5e=sI&s0t@D_g0Gmndp#+Q_0Bl1Y z;D*oTg_&OmYeVVyuY+D9Wi;5u9s%~Yms>QmLd)8MH^Vsdii+Uj;o(yDvX4ce04I$b z)w;Lt^645pnBkBGU~r$`S+?c9POxGyf>3?N&9MXxh81TT8P4QJQICVH&zCgtYK=AQ zeY&tQi1d@S)^6}GyLuLKt2Sk(RsA4_A3ql_Mne$HFuizwvr%0BOTxv2+v4KGk7=Sd zO%2SWixG+QOH0~tnkek@Ax{*;0|gWj9Q&1h7_r5#gl()ia9DH%+F zI!a}$?TPNa>m?_V$6!tmoLLm~+dR1rPUQh>pdRc6$+CB>Ma)E#A6@IwfZHbaWTRtl zD-R^|PYyP^TY^ri1CKmEjg37Y1Ov5z&4S?oZ;wl{J(G7Bn%vf^V_gVZQx_pMI1V|4QKQc&?@3z9;aWdl~804{l2DQ zr_S(ml?|i;;|_bH;GStA&um;LTljW#F@;3(ldV0#NFcVfO^|KCQYCd05@0n8evpFd zo*>Ns*=OfC6XA$bazv~eqO`PB)zFac<;#~hZr^rNTX;bqs0pjT3a07hgMGRohTl`?wohKFt`K(0rC^t*|0{IEOqR20qqGQ6zSm{<1~ zoNgN3db;!V6jdmgyozw)%mG`FK%XS%S(gecI93xqYfbxv5ARe*0)`X5J76tEig^># z2_=y4Lmi4DPvgSR;q}oDI}b-4!b97*BCg!+)R^lzI3Bs#$}V$j<*b#PDX}o~--nl| zdK(9M;CuN4E#KX_#2VGJpzHWrMOMXLj7K=I%BU|t$eY209DXyr;s zS}q^z)hmh8PFx!i=M2aU$b&tXpsT*xV@^Zyba(Xk7XcNb?Bn(cm z4g}M-nIRT>;Q1|iz;B$|i)sM6ivU5X_{}?L=0aSYdJ^P$D5u^tcLGHrmd#+3_8(!LBD`s1pUCEz5H~3hv1$*pF&!oM3 zVAr=0ZcgQ`1!f)m2Gb_|8(!$RQkh`Hm#O4FGd0^FL_HcP~7D`I7p?w10G}dmF;zK z{9{lR=k{9LM+ox+&juw-5qs{;e@@Bw7ssxkgyu~2sh8%KliYhB{y z3lShvmIXhEAKq=tn|Kkv%U)(EgtFv`HokQ$(_xk)f!`;u`Z@0=Gwj%OWo_+!bHIW7 z3>pt0XXGVk=dz?jOGqmx8<~a_cgR5uFQ)hPBfkruvQ0wo_Ny{*>}VbxkTU*#`0B9U zoyj`IVqSUqNHP(Z!w!|oStEO9Z6`K;T7mbSYV6yVjM|P>cL3)Ssycwzd;@G4a=*Ne z4-`}T;kya|5cX&P&dL0FJK!II#DFwIzs^5t8}%%bWynQ=v{0al#6{XVJ=Gxz`ooR6 zC*Roso}5c-tJsqV5^ltss3>bl@*AP4tB2DZPP*1I#0*I$p+o70+2oS6;HRgj(B3U& z?4b?Fu#IV9p31p(fh}BHi%;FMjB|$`A5jK_=Ba|Hg^Qn^V8~hXm-w;oL6KdGIK#B$9DphUo4`m1J#ZIA>FvZr4|5 zfe>=oS6DRb6=`?9AX|xppy&ra*tU1^w5dcKnv;CiTKgg;dt>x>&@lk)s#+7LMVbyXz(Sb%Yc zn58UgV%KK|poP5rd_2fEz8O-E0U4z^J$ZJ5GC3+>$(HaEuI7R?z;DIH^~Q5NN1!rE zek(zLwt*`!41DR~CJw=h$uR-mhr2K^k%D~0fqo>ZRO_(>kM^pp!c)g>JMIr%L5b)N z`0l3E_6y=t9?$HE#~lLa&tI{z!T-?Lr%NO)7{~v9`|-}q^a_7od0OK${_UL|u$nH8 zT-lOa@sw|%nf;whmzqH{NJc>W+`!&0&Rit69`BX!mbDlD5>#{nW4-n7+))x!g~Icc zp+}$L*x?)0`T5+PT?-eLWkqK_qCLH6NaV9af-P6PHxdImTqjoB+fw8_O87%xzV{Xo z5|YKgZO^m0+TFdjk6zo${F;)Hn?eh8|1r|aM2WWNE;tacx-C<0+MY9b&4eHoh*#bJ zuVBg(Vt26L-zC492}2U*N%nOEy|ASR^$iuQz>nLszMlVO`7n^!)gAGT#vDJ19GnSf~7}4iZd+QYh1opdoFx9!IwyOT-pY; zcLI?ka}%J3Lr&kDFFLq2+kgAE;Mr$y@L^!%fIhBU{rWksX_I?-PP~e-W9!Q7jUOZS z+ayxcjoYg9cqH%+IlV-J1PzIPvG!<2b3iqm_>(4-h#saQr>x+F3 zQv|pLKX;Hhr}FjEb5b))+S7sWxHDgA%j*v-;Ed~-%TjF_0?VL{ZOsxg2M+8F)(bmS zd}ef#EMT*vQIhtEt%Tkgx`gi{`qQTxln1^yH5l%iE0kw5#)oVh&0m{5uOHd$r%X@J zn17>yIiY)kIiWFcWUB1tOPv!3Hjmr#uy4x8wrN;wa3D0Z2e_}z0lDeCckdX$jQRIK zhF^ht!;VF6ULL;4Gur}?vdv;&H3IXk#k(`=Se3V!)}xOUwAIm)~?Y zo*n(|&B4t?hG4q@#=S(5RSYp9Xb;V&Drl(rBpuw`

;+&L81%4iahW7QF@Ri`3_$ zN#8UnpOqsv4JSM|FQs?f8lAh%vpL-qMAq%k^y2{z0Vj>oU#22vkYTeA2+mx4NEr%B z+n3l330eT$y&=G>!b-uVAQ}700j|7(pr3E#bE@IwdmoNaUus*@*61HIDbChH!{Fzr z)Yeje@=`^Kw+C;#k)?`vN9XCOX{r54=q}P_tk<=5tHlskNFpChgpWuty;P&GEH6** z!Qe%Gw0z>_S;#<18e|PEq4N0kMFQg8LA6|v0DcvG>i=uHl@o%~`W6=P?d|RSLZSfv zeG%r}T)kkhRHcl&*mBfOQ+~UVM)$nY(CP~^L8pFoUaRx4-l6yNeCo4~^it-VS?8_4 zo>P!S%-*7|-SGbLp@LE^@U!ZB(z>Rj3kPR4QPW0`l|U@XD^3Ji+kaxrel6gVVV3zIBwumKVpNz7BFWFX-nPKeflau}fDrT3xM+a2=Oc>AALRxP}V8nA-|YdSaNgJCwVe4%UO-5$t&;2%4v&UahMK(NoS!y#=SPH)h4MZ zDX)GywIHY1FL>QV?X4}O&n}Q?Bg+|1aD@1BJd*@!^KlN9#ZrY6jO9-zKusxHb=F)^ zvou@E-Xl8a*0Tu*S9tQYnMV|Y|D1}oq^d)pqUgt9@{tjbM?5x?2{!#|vj z0*jN__5wQ{e-JP0vd1-4B^Y4#s*rJDx>e_|K#*JZUCQzKJy!v2ax1N5B-tbRojWrH z1L2PLjOBff+3tVANRPRGu~m){Wtn$K`}qSXqLQgk9q@<^IWMYv zDmIuOBy180@&KL#uazDw%ELY?2|G6@k+iQ#o#ZFau5PlpK1J<+(oyPc9^Mea0J-j~ zSEK6F-`dBm89OzvQ}&$^O*_ObA+g0USn9D<_ z=UsmAK}19t;GkKVQ4E_~#@d}XE`0s++io~5ev&~^j(O^Gn8;;cUuOJ&(pjsM-%IZp zJdS%>ibXjKyCXIPPL7Yx{d;6@D3Rjvf*OURJqA%p?-%cKpj>^qlw&z?c1@5rn9E9KK%mM& z+CU31s9a#{+5bYTPy*W~OR|gsL=3ku_4QuOmo!i+jj-05w$IKP9xEUH=v*g6Xu4#! zTffgemrj1lscrx1Th9qen2Nl3Bx;6hJzn^7Sw8^+ebhYFyL|HE@}{#wRHRUnp^UYM z?E`ijQ5D5BU3aEitQUBb-bme0dd&!^t5N69o=^U`IDBI`GHb4|!<{Eu=KN~4cb^kI z6H`?A3%$U^LR|voYk@21K}9VtACjKEXPNVSq9fhj>+5Tp#y*^tQ_E`F&MH}ao`(@} zPj%_B1qB5!h-d1xUD}*DNW9ubIZ9$IGZnsd#H@gXZuvYS^IUi*7{v3i<>yqk*<1#yC3`jVEX{Gw8 zR(N47h_O}POLVA%SZE*O$F#U~C#QrkfFKF94W6$d?*n;?Kf|Pbs5v^=<=%T*5J_5> z$v5dhQ7>|z0K3C9IJ;ZRf5Yqz+}oR?XJ|+>*rG+CUs9jd`>(hXIf5b~ zj+-{4G-XSq0Se-x$-pYz~3Ueo1Z$3-=Opmx%ZH4o#GLVs71u9P@y`zX4knx z5WI9{nIv#3ohuv>IOf|9_OZVx)OTv~?!Ls&3?rM~WTB*k=0y_fMR{GBL0+3;n=bI+ z@#nZKEf+stA3U#rfhtZ-(__bo&2@S$wR;erxFpum;JpeGeA}HN@5~Qd-l&GH9<+hw zlJ5JzOaZkN9f>(s(l;~=17u9Lfc;+*Yp;E`W-p{cA(^?S4zN&dfWw0Bg71F2ZBL3Z zjFacqo7n2qL=$5Sbq+!C4b1}L$%}<&P045hyQ4V*6im{_tS8w^c48E?~SMB z2B7p|I2rr5?XlGHk35=6H+#S{PI9R{4bF-Yu!WR#0D=Ww}uDC5zf&vV9 z!?)S&2jGkQ4^Y~bz-z7<%Y@->@)*qITDSRt`x%6AyXji*w?yE-AeqD7uZ11_E7LLv zWm;I)o~+#lVDcf^{0#PY(dic%#(pTL?Zs$!2|PII;1mGXO}bv8^LR>PFVLJ(k4CgBy^VRbehD=aNND{V^9T zOB%sVy;n0ALG{EQ;(V9y+qys;iCxxYsDlIbyMyD8n*M*$ulgT?G9?Ok%T#51>Vy?iaPuVfDv_8cAksRTJI^bq9Swi)pnYV)p^mT&}z&K2i+KDJgOuk)nAK6lrIxGe4j6zyCtI zR7IX@-zUFujuH4MtMPr^(sBXyXG3He_dAF&Z`ym~fHU~0u6q5VgoIi{ga61^rI2S> zd5x~XFV&7b>0*}8Dx1Y~O{s_Z-vS%M>WI@r9@8nG?ixaLL>?X9HKPoMn7q0+3q_hz00nA!e1M|ZJT$w-qo(U{ve;S5XHdh)rDmdry?N8}}$!0ALhreSABho6^o zpY7+{&4(AF2!*!~X~)0x!Y^!sBoGm6<}F?nJKxL$pva@iDP!Kg4km=9aaK2#Fbtc5BxSlPK0<3uuk56L;zk?OFv$M1HcKsDzkospkxjNr8 z`seoC;tG?BxlnN6Ec9oX0FoQUCJozY^OS(510MQcZK^(GtUt@Dy}Hk(W~BEVa3dpi zFFv@Z1%z4(?{q4KjdXG{XnEzVn*9cId!gT#YQW%OkSbYXbmYsE^Eo~~{@=LG&C&6a zt}aE*{jZ@AlmMvuik)4tqC_9=OAtU2sO~_fZlLP~Y@!IfrvTvI&tU-$0q;K?f_np+ zGb2EwaQkLu{tR_!+ofHq{kvL(&*J0MB{m~eha3yC1A*nLtZC) zrqXBRYdzyN=8|j$174hE&R@R`hRqw@XGxFx8$JJmK=$dk{%AhH=z4GD;hXu}EP&=B zm3~vDr@#=<5(GO+lVTiIJommBppZA=Sglf~d~Bn`{nD=ewE+PUO0D_Rx_-xL5h-49 zQz($j+)zsD3J5q+pP?!!Ly~tCa@;p*CD8@u6zA)kX<9~eU}k3KvDnXoS@&s`lEgiU zR0Km2J0Q+yAns(k8T&eG$dX*2y<`fUBe7InT~ZmgVXBK$Mn&i za@9&t(pk)l2&s90rTt_y)_eNsoZq#twDU_RGXpzWB?+b#&yK$qqTSvZ-SztybbAIt zw>OjJMxc4d$T^8!AmN|%w+a!Zx6z0OoK_|4z1kU)!m?T-{y_`$!oKn@QI>eJB|+`8 zTHA*ZI9mhY%*bGd6BH?Zt8alxllP}Jj^Xg+m{13f zT}LrT6mkH4#{*$sQAQHQZX<%8^vChuDm3r}GNKOj(3B!jHv$~tMxPChprBKmLKaZI z9#LFu1Wa;LNl?MA0UVMqzwPJc|8OVSU^>?x@&Rx3;NbNK4{GLIHOU%IzR^Sx(Bwo~ zwabWlVS?eoE}z-!q*ltq#!>HRf~F_BKb?>Wwb@Kij6Wg@pQ25j&$#QT%abl#sp-9E zotl;gx|XzXpZO52-d4ZEiq*G7+Q2#d8CE<5?Ksne74|tG^apkHd7Z2OC4U#Z&~FNA zARDVOlkptDr~SEG2^D~+>VA2~4Kzf2f949xkPpB0C&IO?#^AdFdSEUl-tIoZh!iz| zJQn458Y$y+`|jmX$mww6HMh|vbdO9;#Y1@`z@P?vPEdk|R1+XKMz^M|h_@rR0Y8mr ztK{27;Aw95`$&Kb$_Vivde6gp(3k{+LB<2rDeTx-xd~Ls(NdL zmV~mJrjPaM4Q2u9Q{y+y>P~LTVUmZcC*L~q#jOiH=Knk2dOIdH`Tbu;5d5(kA8&2r zXLH&AiydtF#|~Owy!qI!pB8WhO<}oxji3LiG%Rh~hj`a={5FFb4?*a>A7+>!0z|L- z10;Hj+N!Mw^AYV~FyvGbdRlsVA~To+PDn`jJ4ym|VrcqZ<(7UlO4ZKm@P3>vsr1_a7m^j5KI4vJbq^K6z)-Vb!7Iv)PhI zZ6iSpM!$~q+|2o&u<<2{q=UW2csdf3w2*709yCAZwxy$zHU%^~Y|vU97~;JAhYYOF z8TpkhgS(b{2dX(0T-&QlAm?Ax!g9&j zm?@r9B_4+v!gU&32HEj`-di32&8&YGuzsQ3yq>nWxL7a-;;l38+yUKh1T4VRfB$Xj zft8oxCcp|Z0(J)Pj4h2CN%$FyfV<@Hk^t7Z`#QF9!-{|aaUDUK}#fQ+p;WEUl1D^2f zjagYK5NN2-aaZbH#B0{q1*@5m->8Ivhuh&wJj4^Z7p%JabFD4v4U>C)Gu0{TTk(cAy33Y+e0}0uuW=ny$?Z-Ulu<9Weh+*9#$H_tLyL`4o4WXj8)z5 z+V1*XYGEFyn%5IZ_&Uh<=}>{<1ukZCYk7K&`_l3MYhsZz+b>~(KjRA!Zm8}9&^r{& zCZ7-g&Fqma3;Qp6!o9~LF{|z{e+z4Pu%j*H96$a(l!xWN%~1<5M+r2COWG1VD|;FV zgSBqcT|}ILA+5O64f&vBX=zCu@Nrw9abN~)C=J(`Kk#pUZYk5u1q{sSlR&o?Fc`Lk z3uI?Nqo{veg%ns!cCHAm@;-C}c%p1|v_1@1Bt7x@GpcGRFsjT)w4n4G8M2;*dJ75jisowI{HGgZ!j4#lT zv(ueq2A9B?wb0CqnV=GETMUv#$YY7N1KvbHl(i^8i&X$by!N<&!wjIO_%s$E2mKi# z!5h6|t3l29%WGr>_FVBlMil=s!MZyGIV+>8d1Yl}-SKBTl6W*@JCT5{6=lMVhKA$5 z$vq{9u#va73?>+d4|Y%B&ct_e{vjIwlgC1Au5NI@+G18vP;jr>qikt@$nN9snpf9A zt!}4W&$pDkA#4BctaC~je4Z=5&x3=N+$NA&Alf*urkq628_Xe}mY0tXzWTFc%mKy71QW@?R)V(CU0fbC&A= zhoyKYDpxs;jx!r~e{%wBc)XEcBpDK#9B41Y>YVZhzI&{*;2YPj!WzxIb3sRo4jXU|JvzygR3fwD^B z0k10noz$wSQmf81Cq3eQ)Rbs&t@uUdub1U7*Rww(_jYfOmN~r~V;wadb#Cx~*|%W( zTr%y$O`(PG)MRR-!!s~M2yr{0+S|3bsff}P+jqM*)>*IA61(-Ok{Y z+gNCbNH1c@MiAX zssYl39pK)30*Ukfv2=m&{CRCquYZ0W!@|NcyS`q@bw0uA3{K|l_#dw?-?a$-)|P=4 zt@0wgS&BdhRbPxYhaGCpTno-$Zc0tPliG#yTw!rb3%P;ZB0yKE?$i>?3{@D}6d#Q- zpygd`t!{pM8PK`pnOAPsqW)TnQQPW9h_9a?3mR8poM`tyygHttl`Oq@2vYXuTWRu{ zT3PoYhM6ly_=glTtxFU`ln$g5s(Aa_g#Qn7Zy6Tl*S!r-&@HG)gOo~((v8w32#BOo z(%m(*3eu&tNJ|LPEg&5dB0c2LGjz-_^IZP!`+1Ik+#lYL?V!Te|8n7-4_H_oI{EiU=f?#Z!^Wi6= zlvBKmNm_Ls*F0ae?{3{eLm_ovLOM6Uy0&)w$vJudMSp~Ez=evtLfI4LoKy@SsbpLF1b#>HCOniXRMTz4=iL0BNT)q05?pwp0 z_rGqZt^Sm!^Etjn*&)}4UDf;&?fy^pQ(7+?=OeyC7%e^iCja~Yn*2aoHtFXYe2P{C z1D`R@zh3u0tQxAC#{z`wUV_*ZV+c@VadBybWHjem<)_YlL4j}i3`!j`g;HG3geASL~goOBPzhpPgT_m_$!kRN@Kmi z!ld_NMev!e@c*5En)%uf&>(|@I)&EHk2_`CJ|r&T!z5Rb%K+p$5w!fs%G#u(cY%%HOEYKJ`m+vAUB()&DQwJPQ*Oeu)HYv3716fHfHdZ3#b+ z4@n6LtN@oNbtF@CD2klnOQG$PcK&;*KR!7({Sjncb8cTqsm3wyN~|JqQpTO7pJkJR0=kah*qMVAFw zxw>i}AVmcm{%+*X)5WQqD>`s92&U4Ftq;ZT{m<;L znd3-m{!>l?G4LMJO0dyZzPkE)wk@E6QOZ{b;QVn42|csbLDT}xtqCz1Syt+r(OnNc zAUP2>ZA&wS|K2J)su)VG5AyZ?wVQbVjoba&{~5Q@3?xkZrV~rQ*quyiCS+Id8yU?e zrDO#bmzsNoEOojvf2nm@w;vk51vnG@Vvqy)kA`oTCnxn>FpkUZre?k?_}ffg$?S*+ z>n2{sU`$-=-fTJcAQZ6GWeRdo=V%i0z`fimIC zt<32s+Nd4YsDx1N4cq!Aiz49+=sRa^l+8^xZenAkml<_nY>{7Xwq zt1Bz{$n4`E-KJ~}W>)}EEAF+ELIp2QRsW`OkX=3a9~xY@5z(+#xx> zZ}{-vqq*bPy*)}jxUs>%vMC|}gsLqQf9qv{-Or-E2(IMg%>)jb3WEx%qE1E~U{5{i z^?|~Du6zi;Y-q;(`t@x4;2;f9&4ZfSsVP{~t0IDDwddS-Nlrk_l~q-SFx*TBwhnN@ zk$*K<=P)w}m=^GdCcc|^xW!dC5>9}Utl4S$0}57-%WOLmRhfCheL-Q0&FT!&*)e8F{XseTBv4>hDLh)4cIOUy1}0 z8hfmW$-9xvf|9gnG?TQ!zb6+tKb(3^5X-eUG%%o_sJv84gIpXCU42|K*U*e!{*Yhw z%2nRWy6Jgp`s)tXLrS8gj7Ge7@84biyqFZ1K|dfM6YPKU#zIb(f|#@gpnpS)MqptS zC$Bz`d+C}!<61P$zG^!2Q&AjGb-N7-EU|mi+mpZdk(!x~v#Tqg9@Ytuq7yKjP~5a_ z-6i+2Xl_X08j{gRFckSwc38DH0>NBTcr;$PjidyP^Nkz_DCws%}+GUr-e$4 zk@wX~KGDz9iBo^e7fj1q@Hyzr@9V}&I+l-AE|e^)S}LhqZ!^$(Izy@d;oL3i3@ZI6 zWI6=OH$w+gz!=KsULzldj|hOW6exi{{B|tN;Q7~{Q70Y)>NvKDbm{@XnYSKRR!+4Ogc7i?fgT6_fGs63XyqfUuIq<)6v~JYI|D*N26eTNlX23u0*EaIV zrGEbUj|j3)N^|DfuDzP{$WIpryCptYZje;=8S$l;r>BaCM=b;qI5svhIcW{>0jZ%G zA{-n&c^%uQLCGC{2YdVbsQ{O~AFyL#zr4KEH8tgX_wHQ{Kx2BRHA-Do%~860>%KE% zijb)JA&Pv;WA79cmC1P2g`?lk9*qsUmR@!9Q!#iFv(AO5MzXT=RwYu;rHFhCVY1!1 zgzwr?ns2t_P|m?6V&HosQmdHLYo+xG-FS>86^(W}TSNt>m^-Jm{DkbS0S%H}kp z>CbqOoL&0_xF`sIqIunzt!Ms~<^ajd4%^$tn!D~aW{LaV1Zk&4Jbj{Sb9Q&?9I}U) zo6-MyDWSwb&zLZ`^I*y@`=skScf%+`;)Aw^cha;a+3OaOGh466L==r2`x*JLzJmD> z56f_gL}jHui`!wWUZ{9=m9$uV(z*q_i;puxDM2#Ffz~46;GeyAw4>ll@u7iKWRrwM zQ~eCi)SuO2b;tXq&mC>^V!lH$#&mbLiLA2alR!AAawYU&WiA@MNEuTQ1N0wtLYW}h zdE#&+FlZY`c&U=wEWB}aOaT;(r4<#a0C{d;aB#xI-`>8g9ygHnPqQM3Z(|4)2eyV| zO$qca<{BnHhgH-2gg&pgJ&7$m_2N%Cn6rk9kp~}3U?<(E`X*i;IJk^ydpQ;>HViGi z#5Flz3tps{qiS1fhY0}RoAd8`cWC2Z*MEJs;Ig&9t^cN@y6Swb!1UD> zaWaaBugc*a5i|}`E*1%B%rIUQm%k3l{BK>}zk9xgI7q9_W1#R#sSPT60Q3;gAMagD zKZA+(UnDfPwdsFCfvQV})ZhDu^)7M~QJ#dGo>Nr0S?=dp9WXOR#6@+pck3D(aU4g@ zZ79cACUYOx2-OuwD`@P&s~Q@eO-9oz541`vOYvBz2}d?n#TJNZR%wKSn&M?TI}WkO z>n6`avt)u4CMF(oGFX7KX@WpG3tN9?)F)*`@tHmGFBkyCp-yZFB#Wl0jCe2hh6z@F zKVK^>Izm1Q^uiC6bl3CFjT`xs496J3^eeXdiU4vH%^(mbus@6bROflPO_noEZP1p> zv6}I(zWO%;KD8sL49CpTZwJx!;vc!wuCb1{1S6B(rav@!btTv8YD*#67H3`IxpQN+ zRkkeJ#n({;QW}IW%nS^;02T?!@#KpCVzbu~pte(0Qi@W*92^{+7hjy)RWi1X<;n^G zQ?E3@aO**jHWM9VVyneF4x6f!IAYoR4h*SoJ|WPHQxiuJu0q7_2ikKI>%UA}bAGIS zQ_IE4E{SREAgRw=ihD$poW2z`ONi{pO6S_uu({6QR&udV>XCtF65tg>h%0V=ae2oZ z82u=+^=RK&o|$1;>j2X+F>t=-bbiy2V`e6n(_%I`v66$Xhu_KG-=}6`;EqYJNU*>o z@`b zi+lUVD7zSkTw8sK$w(3}zT6Z;*|NQ5Wvm4Wy1p_wKG-}cvbOPi5zLPNz3Gc>%Kdu6>GVnoC>OOX`g5O13@^DO`JF{Zx}be3q4$%5?cA9(r6e{|1))n z@5>|0eH);1VMU)>3kV+p2?8;CBO0+e~v)cS98Q^kREVr7UoL*s5=BL5%#^C zlY{q3h#upzhPhZAfEzVG06NCyRMZdx zg}QM2%_G>J{cXd)d`1?(J|duVdBO0v`o5mx=M$i8fb6A{!Ic>r$zi#3n&ylb;V>CP z2rJ$hF>L%@Q)#Jc@qMnX8+3G~DH@PTlVF)xj3YoKwSXtDl(TSi^dh zdX`y2jWR^&!({Hc8_6I;A&wLg1O#A|*IE~1eEcl{)V-~WkH^DXC=>}_?t;Us2@ZYz z{oYerXvX0qLSm+*LT+_u*aulqg+AHx2@Gm81+7V!gd5A?dG4~@NvS{63bNP-yH1SF zC_|tiD`;zL0lwYX?M`jql+mlbwe7*(#$({qge=d^y>|;~Ig2Elo2z%q1PpZ59|{Wt zh~J3Xx?fFEk&@}fIdWOxuYT*dypB%76K-oYOxVW5O|D*iKDEyhIQ%f9tU!w}Z7){9RHm(#bbYmhOE?kD3inj&J4G|bQivihjDx~6CO zXPAO$&>zpCIE1(AQdu4BchF4=7YC=L;8;=wmh7x6%HLW#6i8X_27vohm!X{4!3wDhyj>3R(^9#N@z!H8INu%Ed9wmB-=0u07gn zBd~(t_mCsZwd-aiYvZcN!qf-4Ta8Y?W&?+r1!0*b@-^0Iy&Ak)0=!pg_aN6ML^G7D zXc@fwrzf7T>tWt28hC)Fq6T@q=z2w#STXOT^3tY0LRrZ{Vj(@@8gzBkzU0hDBWu~r z>vPYOU8pzkJG*awMV%4YFD`nUcPtOAtu5enLP;9mqk)%Ehu7?e))K?{c-8NI#a~Ff z=CKX4XeRR|dJ+Q14hKjGgjZv#FpXm zZ6bFCRr7M?ZR+wgAkhq-U-OFl*7OP5A)p>U*<1FJpV;Sf0V?r)O$DYI| zzJ-ZxGwmeFe7$CGdnhIW%fH2R$ubTxm4?`RA+eJ~&*LaF=?UpST$XnXwGM)^$rgV0 z3FBX6`&*l3;D{UP$X0_jL(Flhhd%LPSNf@sujXDX->cwjoEiTeibQ)c;s(#X!EhY& z&(&9VXaJ0qVcG^DR{U4=_TN7Z>wwL=kCtwY>;WBGzHj}dKG3RlJcw*-G9*FSWPvf8 zKrc3z3G`x(xA6Z6&~;K4+7O{6DGkN$c}bgmgvIeUT;Bk#OH2FS!Rrj?d6iIV!QD}? z8~pO+C&$HReRp^Fn;6!>p6{E33Lg&eJGFa%uBbd^ZMvbSe11l*rl64WBqF-3{N|Ik z5Du>#vQh7}1B2%Cc=wsncQT8uKMf~k4TJv7Xm&pH(gEmaUz+QmNgUEM-hBHpFpQH+ zL}A$>;A5dkL8Y@V3bE!2YmWfF&GYNo$||v(8SQ51yqS9VZg587$Zn*QkA8D@heSlSKzZuv1xi z{NQ8m9AAaME+;@yr(-%ZmojHyU@!=HHmqm+Ed4r{Iww%(RPo$-efWNmidgH04^D8{ z^EQBTd2D0+`7|rhbaUv7MgZoXwwDkn+^#~4ll@mc zovg~E_FydN2Rr8jE`wF-q$#)d%A%L@-aj@Ie}OJ&1yYGm*s}2RO23(XqTlh7JkTD` zE*HI`4Li)wNR5MCeoR*8#Qk}-KPY}X=KVXvk{mfHbk#+Vl{DAlIvTu=yk4bQw7uDp zpO7Fpd)1=_^a*V}J-rUDp~v&T46_*igiw3C*q1;t9okFieSy@R={k*C@$cw3BKWiO%2!tdlX>uwu7p7--h~MP-O=V~$JWpchF<`;*5L5%{ z$xZtvN1dc=DokiG-1m`WN06Px7JNscns@Oe==$2L2s#R(%1z+KhJxu;=HuiZ{5DqC zE7%NR1f>&Bkl`n=L>pl~e_r#HCE&Pees4@)FX3fZ+i3CzA&2YxewG$1^w z0i@GLfag8X8BAOJ#YEuz_d2nNGAPbl4CUf0$aEp@{YvgG(}ns$J`{J#uUBjqB{}Wg z=9G;rKInET#_l_M6wK@v%1USNI`w^byP6S_^ByJ{h7r7n)pU%7?D(slqbZAUr_kedo*0v-9h&p zYNxymSYi1zr|?gjs>fX(QZ~>Q`R4NPAjZcekb^2)MyY7=6|E%;{Y29-1~{j`1J$vUS)9IX_f>`64b91?eJ z?32#^BOC|}Qu86L?dw6_gm76`IBYb_nF1r4GSZ$^1mFvkc;94^?T}V^8&C&;Hgpuu z-p~j&Hv9({8s8-V;imfBr(#G63-Dx#$f(CTF1k`-gy5j&N7RCj;RW~1w|^kNGWJCM zU;@a%`imLFDD>rJsl!ekbLAG8r4v_jYe)hjXj5IKNYK2?abHN+fOjl4PcFnuuI)^( z`Qt5b#?$fNOPrdZFJrgUR2Z1urE8ojcui}ltZ-s+(0;Y@5qwD@uk_KtC%zlM*mh`G zM)x`$Ne$<^r8X^pz zOfP-vRv5~cMOo!MDyld1q2siroWX=|X4rKMeF)X-6HlKbWA zarl-bAqRmx!N=`^8ZmA?TlK~yo&)J){Ju~3JCP^ndx+T%52ahp``V`4zlUUU5uso) z8A+7Pxit$5oj6ve%!RcW3ksOVCoG zfng$^{bNQ+#r)?VP9IMIRA%UCI8RVD@8F0YD9cr1@p2Kg;NcZ*DHH0~yaPZIgJq}2 z@Hj$$=6|Dv!1 zTkYJGu;jikl)|9uF*VhpzrdlijOlLoU8;4Wbj*v+T{7my5X{@y*HY?)=zYmt zs5m1v3SOv@uFwq++lYqK{nRku$-pG3mTBaF`k1yJ<+ZEM+uSSVfZP z+FbV~*U$cpZ+M|ZrQ4)dY~P7V@{NP5_i*kFdzUBdohPozy=B6rph4Tq7YC0S>iZ{d zl?xf18AM~M&V%yDW%I94{r99hjv3eEEFPt~2tkMy;Hc#&e_lRu0d654uRon+1K?r6{2K^QCPi^Bc z7iUW%6-Yt_4*}SzR%3<%R>zhMR;ND+DdC91r9hMAZ?b$3I66b*=?RAlZCmvriYo~R z$h93yzlXVnPti`HzX*QXPtwMn69ozDB+R7x2Y1RLfU97!k*T>_rj619MorR{oQISQ zYcAI(_7Wr(yV#33oNw-ps&rx-OBRIG=aviu~u>m!3wd%m>6v)84&_tH0W=pDn6D=(0PJVRT9lII<>pZjK7bz?#*G97TGbFT3` zxjSwNvg^MJrPV&$X*jPr4moYU6E0^~*Kn#dwP{99%F8kef3@!AOGb6R*=E{OGP>b8 z6R0(Z{`A~oXAg2&x{GJS{WdXn3(yP*f8cN27xmUxPcVPW5oM#M?$VYblKm7UN6Cu4 zF)}xmB<|O?I0J+bO=yktrGZgK`yI<=*2AuA*5Xn<9_&xMmz}fK{sC96LF44U9eO+V z3}4D>=q)cqOeih5_!oBfdejWD^24sZLL*a=ARHxb9SyqZrV6AFLvr`=>os`Hr9x5K zv1YdgeEz|C`%xvOnR5aEjgj*I_$lxx5>#{AOtBsqu&UI45B~pn7?4sXjLQ$X9&^%t z9ZYXMQJJmhGK64SI}R-(p*od;sQzj2+4c{O@$l;QT56J&!Lefgh{7B_=X5cAYIUXikLpH&G9D8wb4W9(t4Hj&Sr(dQet9>aooHpW- zt}`h?AM0t4hCNL?ObwfPDRDNl7pVAo|GS#OQHl=q1;gyvLt*ClJ}>1aKaCNZdWNoc ze~ppdMo3iUQDax>@Wyy>IsIYdny;$_NwG&jug6P)oD>!d`2LMN=5bN;__w)_kb2 z#@?veH*}qUG$oRBl9Qk7C=G;ltI(c_W%wEpv9+b!_(oWtAgoEFJ%N$b*_ryc5V4fs52WV7=K^5F&I;39V4 zM&{9ax?}Pciq`M-OOq|nt^MAb7UqPRsYm_WVKKT!`c-~I_*A&{MV?L3d8RKN&D?*7;pwc9*Rybk62Q`~1uAZKi1y z0AJVVM8%la#^%mp%HCdNQbx+7*~&^x&Y$3D_|IW%j*hve=7;CcpIdmtsJ-U7F~9~G zim6QH4iQtVT?UMjTUxJn%9C<(`0;?w5Kt+f&7R(ZyTm!Yhhw>O13$Bd$jgV4#pRC? zy-%xtB-cYa^WH@zs}T(}z(8B!qCVNNa{1Q5+*|;3Vdl4sz?JO&tcdSA^p2U*H!M32 zv~YbJcCv^5qkZLzw$Gg**$SZoOMP>l;$~%`$&CnhebT;!ZfZX6S6pZ202@yr?fg;EL{X#?fV}r5nRcAO8oj+MI)U&1_8Lpkky+ zjGnUAWm9$mdHlgzs8Jfk(%SBb{vCx@LS-mLDY7Ib=JXPDc!M?O;W2Yq(HKX*zR8;! zUd@*R!DjMAu>JI+9EnmSZ*qU0*uXpU`%Y~mx)U$*s-(lSn+2exHV#e9oh)wy=*${t zy8LI*EE%bvS)%Y6Ykj_ZsYzy?8N(Zw=YG(Av0rJ9@6-+aNq1BHc-_34|9(*Hte_A4Qn!iUej zbkhoz2iP5GV_32hFA`!Y4H~JZ5Nay92OXZTHq}HeDm^{d)ll6=mkI7W3Ko+yGlr{c zYe1awDZpd}6DZwUJ%uSe5rYbEbn?qyNjcIxC#S5J7sL0%df5l@t(RPh!{F_^#;#(gQ1 zRx_r?GDMB%O5?t3M_F`cfTn3dEZEtm8F0CxzQKI=@LM8D1SJ`Ts_eqIxw%6@ifX|e zE{I^}cn~4V?@?CoHinrAKb^PH?NC(wt$XU)E2hbVpWlBgDzcNH@d*i4U#ew2`SEOM zW`@DZ`9%YrTmZY+=o-MAa+rM$E;ts3{PHwKkl_ca^t-9t9Au2Ci7r@JdDZnp4g#6; zBF|82tu&Ho@kha`DdSc`siTlIYXzMA5+s#f{9+9FZ8=hgvfNfnkBO|#3 zg^At^a(z(XjCZ1#;O&|q8=RX(USRmjM+UB?^**`rMdLepX#K|m?os>p3B5H=zQ@mW zbyuIce9w?d_KdV%FGY1;EB8eKvdk5esjKk9Nn&$(h~KeG_w+j_S5*EeHhTSq~z3WL3mnYJfL>rL$!J zUDT_v&+UiM<7P_FSBv$ZKRj;uULN&THu!cdVUOxNA!?%Q7w5g4w`zQF#HC*=6Cq3h&a=isd{&XY;UDVdpaIy(KL-U@kY3UdM7Na)2 zZ(4WiwL7co>h#CJ5m(!;Bm1*6ddCiYo^VQqgr0ws7H8;Q0L~USIkMG*uyupMqOz8P zPlJQo8O7b7xRBI+)*L%NNl!>Aq)x6?4eX}rl@v7D?Apf|TvfDuoN?iE`tAczlT3q( zUFf1FrTe^)9oIa5xQ33D#t5r-p#cGwicDDo^H!H-Uakq|MIG0nShLaxgcxJv3V26L zOY}OwF`FIf?fWU(GiU)BKcIa4z>7!3e2XDEIHbL~)%@OrPCpR^=J4I1JDtb%XxDl8H^4aemu3u1?khn^rf;%WTLtk}xX^1N8^Er;Q&*by z&?Jv`M6}~k?q#TL0Hsyx=wVh+$;E5p122=L`a^oUj2SEvOVXHx&>O!+z@qll8%X` zOPbJR&D3vMaw@VTx?1y2p1V2SPjr4Fc$;DS*LP@;tJA?saUTm^-|4!Xr&{8|Xdg z{Yi)Zz>hfzlF zW(lvN1HLbnls0cG`ux~-V^dAaY*bpF`JSe#XZYd!*|fT&N)0dgk8E6ks#BY$?z~x% z@SSSY#h>l79{FTO1XeXRYY+v*TN1g#d z;5jD$)UkPtFatuL-Ls< zi;M<<-J3mu6WR29bt1<%}s|BH0r;Xpo2ErRc?2ySzuJRb))M%IJM z24sEA-ZLCp++#J0YGsF8MFf-XW|!z`K75+ zi=OVe!IR8(@;MH#9el@y-%D(S9WtEJaXE==+%aG1KuBR9>Zd+pv2ei{ilA6(>+tGL0FZ zPPK3+%fnEV{QZ`pmmMNP5j$*aL#X(VUZ{~elm~<+bO53f;PU!=o(F| zj9ZzC8o&MnM=hP)4RTU)g->*_i?x*{rhFaGt|scvbyHtf9uftg?x!&zFMIb^8Q(;50bsaBIET*)!Z$oM>1eN-6P%uNb*pLKjISt(yJ}7i@u-GW5D9 z;tu{E9hLmO_M3J&JP{t11jDhQ9LDL?F4AyN*V^jc$9KY#%Af0s4c51$=vN0tPJ2R}0?d!GLx zOWc^(d|#H|9JFfQ^1zT;*@AfGqUYic{!|mW>Grm-EX?1gQ=JUt(|kf>fh4RRt8aZh zdpGEfx(crU8^jJ$lH0&xJOQ4hIkOe)zl?#&Bo^D?eLKm+ct#cd_N=nic~=!rxzng& zi&s)5Wfb@DbTq$f}z9WvSpP=%{N8P&z^Dh_YS5HbNsuQlI`Y=l{xtDNmGBP&^3kZ-n*x111&KYL3 zg4s<|$gwBs^NVSSXJ@Gq{M&hfJ5%??fswop# z;=J?{w2fu11&k3cmp>Em9O1S)+bCvv5GWfaqXi9+m^+lXr`>V!z~VT2D*%0~N4626 zC!QVhoWFpDjD$G(gKLaAUPjKEv;vL=hZWsHwc9T$scLAkN8`OIeyYZQ^Zt6r_AhQw zyRwQX>rqtmOFTT2B-h3@0po8<{E$but?UL+3!;CNwgZ}umh!5W_2Dw72s$Mkfuhm) zG47`=9=2L)T>}L>CoJ^LZ0d0YFkm+$RS~Y4t}?cCRVE2G|crfmYfkA zQY4SRTQ5~4PsZ4nB@z4r97yo88b?M!oJzGt3q}Nw1|}rs+~?E$aP(95n(3q}Rex7R zb!q1`i4c|>3TB*S>G?|3fhopSj^mD(FLNZ&xe{)7j53S6@I5!piMc}v2z>nlcK^VG zx66$PO#mV>#>oS-DI#<4nIdP>-Xb4xg~yZwZ%!@o?&PbE)>f0TZ7(k`|2RZb&Kkdr zMaIAn_%5Ilv7_=fFi4B5@Eh^y_#Xc;rMLbuKx{=DgX7r$EF%|N47IYPLOuK|tq?Xl zkMC<0R$9;b&0Su!`#+`gHBUSrDJw58%rB%kXISw+5WR-&!O$7A%7()SP~RL@ z-1rvkk*C3ZL1vau59Eo+wxHj&s@n@2v>IZF)gZCVp}Rm~$g-0-H|pMoyhZocp#%1Y z_>_`x;UHWhyK0-p#^sS95H2V+MC86VNj?gwONCf33K{prN3CeAv+ud zJ0}|T_^7=-So8f<6dK2(qmv7lp4WiZJMENBAKm!cH=P_D+~(aSl$%?e`-I>pdv0Q5 z2rUOQXy=xrvRUAb_0Bvzukt?e_{-z7o@=%$5TAo>Ncp4Hj4XWCer~ck?l%&zRAOeF zhH0S0N>o(OX3X+06%RopE$Xj*Kj}!Ct+`1qHLQMn+3bd^q&+vzYZExC=NQ=N)UNI* zsCxQRXbADArzGO?>FYD)H&donowAc}@jG*AIux3poq6ah7k+Db32-nt*vP=y?{*(s zgbS0K**{3|@KW~Tg^MgF8+}UMc+hD+&6i`@NSRP;rc3>hig6d59Kp6=X@}pq%sQR= zSXciU--fmR#P7KxIq4AYhFYnk3j@E`+)%R{VxeeN$32|ABgE;2Qm~dV839RbyjEVN zv}*|0vmdTZYj_W_7}fVhD&!r(DFv_TVsKZD2{SHNP}RR|eC@$Apxg zH%1@RiQh^XAEj*!Kj>Vh$h#a*oOHjVbNmeSbA-?7Xs43tXzTZ55+vm-wVgF?(v|w9 zl4_~!U1F4v*Lf+a_~K};|La=b-f&cZ2~p(FZ{UZO(}yECRKN<9zxf?a^>PU6%eZ*X z=&C0pCG`97l{M1@Umz!ZOQg~Maf4_7fh=Q6+aH+#`S;Bk!Yi-V_oP|=aD-xLVy_>fu-K5F$aPFW*PyXfTM*=_ev&X%O7Jk#kQjxsL zbSf*lo#*$9^HBL?a?)#05s22zJ|R)AFol&hheIAXbpJITX$mBdUr`6LxMGy{UQI)o zBKoqfJiY$`Y|`#NF$`DWvYpRm%Xx7z8LsQw{``2P6Ms}h4i<7J&-Z2+_np{IsoLzU z2aT}(^WE@M{C?W(nW&XNW5|;2n!?!{yC7zpmyIB0v}Kv(X-sr^u+-6$P7H_5Yu1Z> z;Ks!^qu|k2IfOR^Ddbv#fF4Vu;~?HudLP!MdnxMTiV>9C%e6*l%WoSLtOzZ z`%lfMV?p3yN2>KIr*t^~%A?Vug!fktKf_nq4JQgSLzq1m4==c+U_RX%`G_kPV1 z0~cMpUB<|Um|oqW(v5G%Y$XRB9YmWCI8^N=%yb;5)Tip=eaZSUOiQLUFsPvig@Q)24W6eKs%2Hij)Cm6+k}EMegsX zU443R;uFf>7Z6LDNxxfI*{sWS{ZWfK3oG!02?v-wPF;ITo!9(0#hV(-mbo7d$_KRL z_xl?xR$kL~#*dA$X29aGp8K{k4_AOYL>K5%)?!mM@h{RVb^5UHCtk-onSyY?`+U>@ zayur?#B#!g;%&*;DG1BE&UR1iCJ2Ww?rstP2xzW|eAP5mpMDy$s?G(4)Q*zD_xIV| z-l-D3QAu6K&24nzAf7>IB%LgHFdXi>1FIB#zKP?Gw=jT_8`pP}=gK>TzND?oC zPyd~G18@H9^uc!|3l)qtBAY}%=eTH)Z`6kTW~Q(4>|dO_)9aAY@hC~HfZJf$l_yA%PxnoeWKM9#9Nky)u1-a$~dJD zb|o0IUo{^O`i^AUN>d-PU13~)@bQ~ECy7q5A;>#$@CixpDW zR#P?lv8}crDJaLzy=Sg<=f8wl1-qYR}9(1~^a{cHX`oK$p}gsN;@28kq(?W7B?Hm+m)Dhqxa z3_`$bHt*hd7GuAG<-ewTvVx$2?FvtkLhfP?mnb_!^wsZYA$byHY}H+pzq}>eN-OdD z(MjmyW5T_5tR;7~50oZ9)O;`|KzDGwJ*Zt0a`W_u$WsK?x$!Dp)O=hn1~eQ&Mf>UQ z$6qlwN}dVy4GM8Rm8SeauJ-!PC*C-7KDP1M8Fn3hk(?wwee~mMkcY(R9C8pJ6|d@G zovzYRY}JF<(tvtSx!5^(IqFZ}X%MkAfgF=%*q!_USrVO}`{u5A_uc#^_)bOT{S2C- z72#eGX~|-SL`mELZhGzSg`R&G6TUxefXc32x<`)w%liCB#r9vnz*h`lWB0Rml}BCG zX{2Nf0iS4)Pa?ZO+ewl*N_IhWJxv|pMDwXzGmNG|_BzP`kUpe-Zn90?whT9%Ev?by z<5$nVSoRNFI{GD`NyoEAmIbQDoTOQQ{N}3Ql>Sn~I&fw6sdxysHH=x<%dU00Ib&%4 z26eDA!eAC;h-__rN{!cm1SAN>yJCy`#LsEGWqdEbKl-76tt{&+&)j`-gx9)_Z9^Z2 zyBfPBS1<^z$@_nn`SSVrG9A=KY$_YmT4A2aO-kGW_2l{<#S`aZKpe~ysH0`;M?k)Z zA9%f_Y`!gm_kR0kE~F0^+*ooB-r;WTu(7Uu)YaC?thFJF+aLtxo%gS~>MehNCEgh* zi~mU$djc~7!N!cB^&njUjegddJ-T1vnp|Wo6H?sEsSo-&FBF$Sm>~0vIRI}1%5i-k z0@boK4*TU9Ts+zZTixynS)KB`TrIaG`R=lmKsZih@jVwgZS;66tljevLS7>&7XE*| z0D>>vmxUOsn4Gs3YpmS37$@{@A+y^&eC(T={ zblT$Mi+f)G1QF1Vcjd*5fW_)weMUrcX$-H|U5u-LpfDu}7RnNiptj zKf#oZi^4Bp@}vvS_y$GtsGNfVT-kw<&Je-r?$V;NTbJ&l59b6yvSnh5}3P|VEk9*-XVHX|P#bODxg0mLE#7kBq zB2->Lu<+anVeVdf(uCS&Iqt;ra;aP!%Fmb(lU}y&f}3ZM&;#x$KB;jGXOJs=M-(zUD)2h&vux%7P z77vpZO6kah30(YCNM9`osHkjzl$7j4dX}MWQsb3MQ%vx?R~W!6Bl4VhkLIW)IkMwb zrI{H_3AA0~%or_S&tnXLv8SHj3xp-0yNqx)4lg>!iw0Glz_g2}4_W&?HVHF$0{(T7 ze{?j)vlWx!=WD;0&VPHrFgJ*U@GOwPl_Kslti?QVKbiz}?&+f%0kkIenB^~ zs`ua7Mjjk{_S?$#dhAa{UI73-WoIa$T9wO_`_g)`D4XOcwxh?siTg8y{_?HzHX@r} zY;}z>y@6gZBsXn#W`+{ii6YhW9a)Xo4dq%lDy z_RW6L2LGZ(a1sC>2se^_LSpu**P9H@Xn${fQw#uO1LXb>UFQ|nRM@rYorDs4l@6hb zN*9!#1W;)T2#TOoQBY~2Nl!u%RHTRsC?zNes1%jnLy;m?LFo{BhtLCrWa9VF%ryt! z9Kmt+wcowowVrj~?lj0I7Qqp)6gy%C-ShJ~5;>W@Lrx69)3_Vfk?uMng7}5`X@|GB z{EWuO6rJQn3fEJMk6Byu#u`*Zdo%w^z1PUp~zW>P>BXa#O#%h z$h42lTkW9NFd@+!XR-|R7!2r+qR*7|m3_Hms*;XXij?67FtfK=0z%(Yq@73x+|#ky z0dxweE1;TPzq~UH)|Z@JBMJxtbf@f+R~F6-re3cuC>MdT zYARubel#@w!c#0enJMkW>BV9gIVTk1W$m0v?@B1$O z_R92e7MVQ`!k&zL>r&~``w)F>2q7R8+U52;2t;5sd-Y)9h&pOAv68=|93v(sa1)Cu z^Ak%*mEYi*tnxzQ4P~}(TP6s8Q#M_wtoxeyegvyp@qQVrn&;!tI4?gseeM5-#r_|@ z&xRF!#<6uBdhUdGvewtNuBWT()h&nM%WmK8%N}rP&vKETm?1dfes<5$){! zo?UBQqOE&uYXU@~=O{4*y=*EHD_(0i&wAAdhz)q2ASkgIDeXWHZmm$9f_w@GNT269 z*`KSi-@rhH7MuSQdk5+ZM{;|fJwJ3D6x^c<#5;(hVy8n@ znxh3Ljf+Q&?pU@)Pv5Q9s$wJ^e}1LuyujD3h@bLc#%>u_7TFPYV@?aBoNh0{kPp$Ecev3-@a^W#Q7@w-fKGgq>Ns3%cB0^!xkA8jM?m{@$}dx9%pHEc<$#Zd1FAAX zO?zrK5gX-Sd_h=@yf4FMW5i2S8^WeBm%|gZPU{k)k z)=xcb>0>E#xxyfF@yzytc@MRZdZb!a8Q2^E2Oa;A?3cW9VcKAtRU}G}lXbt~veS=a zs74Ua?XbY1SKF@;)Z(94*w9CDf%A)vM;2M3AzyUrU0Cv7nGOoF3jqzOZ*FyYqYvKl z@}}5&Z>hM#heE)+zo0y=xGSN^0Z>$+t> zxC|F8aqchHpYT>uI5;orh#CYVem z?*!7Wf4Ih&eOrmk0LeV#+>L%pd1|0j3O1gZInaN$Ek2Bsi;IuVs?B75UnPoOs}}TTRq2Q< z8C2B>es*bIJdFux#N2pZ)ik~R-oC$$mU1~6E%{QM{O+@_#!{mUhP0&n%-rP7;O^Dn z?P`0}z!ZDs)qi=fMOdR;dm}u1HoBwW!Fwz8-po^ZH)D`b ze*NNNgWG(2{3<#+=K1C+{@zc_+m-5~g#0n8>C}k?m@7pBCC-iF#;VLkHGxlklu6D= zi5Q{#>tZ*te9rGd)ur|}`%ag#x}DPi1$UA|i!1j>z_QVt$nXFHBffF{S^oX?1w&~y zd3{J-?;I?ZfzB*9SE>IkgO>JawtaO*b`%^-8KU$g%D6O1J_y#^vGAR)(^~c(uCb5I z4!mQ^<6m@X^7+dvc{*FWrYJqnq!Y)mNYlteW}@oTCVHYvj3|VZe=*{CO}A~)21+!? zmmm240ll5FjP{Afp&hPYmtbRKdr%s#C6*cQr#L2{r{3WEXLhIUnZkcI%(}mWdxM6m z#infz9#2%eX}VXYiqv?hCkD^JW@vD?Jkz%AOIs}aw27hzn4FW$nS6RgrAr1bfAg+B z&%FotENYIm|Hxoiy|xmxPoRX5V$)aF#%9eX5;~Kk1M;Ve(nh|O+~$*nic39u);!yS z9qIccYL%1)U_=sr{WZllTAI9L0f-#5$v)?EK|DaLJh))0*Wo&PTRP&<A*v zUc%ALdW|2}jpST_<=`<`^=u5eXM|BhSpOcX@CInI#Pd^9#5V^dSt^<9mDDrmhiSjE ztsvacdB41vwv9)BYGDb-tb8`_%B-VUbSAhi#bQk-O;^^uUwzNhXR|DbWv9*flOx3$ zz+@FYCC#<*q1lR2v7}ywH}NCJH)uwp2&;R9bTh^1iz=+}1hZ@#1Yp+1j7~ zc7Jx-I7=9>oY|AQwU~9PM3v^_kSS?6`rMHB@uPjcBjVu3SMDC#Aw%qod7jetf*0lW zyu7l~#L6XyHz3&fy7||tW4fjhg<*P=zAZ+KKc7e&??L&S%jY=;Iw?!P0=DltRA^0X zxlYG>v0EZGrv6MY4S%uBuU>SU4oboSsc>&^+!xs=?l)h!Q-ghr3y@|lZQ?oRRTVt3be7K$i*md8I z8p461MG6)R4}f8qt`8p}L_~xtpoBw;K|YTN^kxY7{Is0y-W>{xacIq zD1V%hZS^bP8(CFFJ!0Y-WwSrDwYh4dmR^euu<@I8rb&p41M6%Sl}dyMT3d_qKl{Td zm5mrH$_nO~@yRYzmRp{zL2S^Ua%cqIub0m6;BFS#EQqnfN_D%x#plowjTv&=|f=)Y2oQLW0y12|fF#XhV ztN6ux^r%W4eryw+us&r7bjEErlG_o#j@IbrC>+%73bLx*I+fMz2t$rqlsSDGtj8o_ z$#K0y#5ni1(|z-esF5#J#tFz}}zl#Z~O!`B4DPzRU1uc;WerPO9T3h zjn?FTZn!0M`$(mC3m8{F=RcM2;vE{Q9T}iiK9ygqeN6sz(lF5YP^zI5(`&GW$0M1T zn6AsB_m7Jy&Do9V9f1=ipJ2CY&XqQhe(v~L+*Jf93M+URXXYaS5&8Q_Icuv*M_TT~ zWnUifsoQz(THXZ{OGj}t+FBgQDMetI?(gdVh1@(-FV4|OsT-LgyE2<)TF8>d>uv%K zpvz6puadd~`p&AGk`%)>Ussn`8;i~cHc~6Xs*eqfdu+s)tI*?Rp0#sD4zefM$T0$i za$)?S@4cOi0D?0Nv)kzg>Mg-Ss84B5ny#i2iuhg?I{rmMR8$yLT+oiEC9|=#{+gtu zihFmA0`%_o>(6o<$8=p*s^9M&TlKY6HyS61kSr@jg<%jPEU))yJnyTx<;xIa-Zw-9fSz!)2*u>k-d`mivoONim(%kLhxo~<6w4VyGkgik%N^v8AolQ zO2O)ng>5Szrpw9X?{ICqr+kelT&ZF)a{pJ4Eqz1Okp)*dJd?=}OHmFIA_|engVylM=`l4{fHXXBchBr03_7poaH#cK%$=0sP84|BL z!SA{c-1=eE{J{uMJ~L1VBKwEH=+iQRuVL@OWV-U|7hg9SSrM%HVPJB+{U6{+GpDI) z?sU=X%S>G#Yd(sgfW3aCwlK9BK3*V`2YXp)YDjsQZU zbzo3JoFPg9p*E@%Wg}ekIkzE4zb1AxH}vZcGrrNPY417Ti)tYbz`9X7{d`vH&9mlT zYQr~Cv2(Z9UYW)(0-IA{JO?ACaQMDfE#L-vuyii9qzm3A5H1Fmw0_vHe+j9UU@L9- zlPXThlK3v}`{Yt=t7(waYu=U~c%yG)Q1ziCQbHecid-Mb+i(zh`kTZv?kG?9+vh$O z7g=gCyvxPp-Z}5W*P?)c%!}T9ioWMz6vb;}Sd=F7o*xu42?d-8mmb}?_?xY@n~wD; z&BBegC;7@Uf}s1Tb%dF(mEd4tR1?kE`uVisPk2-uhDGM4=Rl4xK+Km!7^gWC>M}l2 zZr3nsz25_VuGH-Ry5h%3rA@b%@vvoFHaj_9dwi5butDviKqhtt#k5>WR*y@Y$;-NPGtEbv3ln=qx&179K9|ydSw@{%as_ z+zzzp@Pg%*>$Y22eek)8A1$A0`^FsZJ(#$8*Zd#CcmLr?sR|cc zR>eX@)SodG?18XL8_SkX@f;P)8Xb%S5uk{b=%xAjB5`T}&vzxUGyh5LC!X^I&^m>} zt-F7!JDtD#1oPQ_R+Qr${`KpdI>;GO}Xh(v&KbFas+E+WxWgX z#Nq^hY(C*YA0`#C5)h)zOt8u_ZV}N-FfD2Sc<+bH7X1D-pUr^`kt;`Z4O4~`D3Lbu z?=w*b=9zT#!l*eVKtU})86Gy;Ek_^qCu}T;+Uo+pUH)t}4k1c`TFV+j^a;Kiyf(7VME8UlYRNWedD7M?6*F2VDqM}1O4cjx1 z4^wgVa@1q$iB;8rY*RV>nqH__R4JocWm$`^?$x_FQuoff@@8vmWlnf6;W;w3P~_4l zso6?mYAt)2dJJ^JnxT3Do;hGr`bGI$Kvt;~rx=yih8Jo@0-1WOgJs{UuSJ3Wf^J|I zKJ}mym7)#Ede5FQx4&TY0XF%&gkHY|aW{soy8Mwlr6-X7@}k7PF&4)0QstOdOE7SS zl~dCyKwz?|H$Bbi^%Ls*D?72Jz4wtOG@Iu(j85&UK*r&`2cve@aIV>w$iWHaL}|_u zSfPc2#^ga1DhP|iQDZR&y0p=HIK7i3Sc}Wf0)%aZqUs<-stnlN0kiiIcn$$bU*;N{ zBdB3Mny%8Mgu;KS!@>7Xlo3Cf+ZR{b3DCSBE*Hf6u}ODrjFK2kQCA|hko)y0f+PJ& zR_FrP6SBF>>8Ivg{##A5#{jWW8gr6LbqJ{qC5YL}c_Z&INN^IUow#~t!X!QMmXuQ- zo9FY!%yyVtefSBg!9mDSp`^`zICMv^RG2 zb6MGq1To#`gjACv?M97m4KNm=A%>0BB&I{C-RUWjJ|2$tnWDexhckOO=w%a4?CyTA zAr35ODI?w!#js6u%gY%U!C5hkohsMGd@DrLhpY|7D!ikGEVUxV^-0g$3XaGQCS1Er zp3#-Rl07mK7_GfZHTBN}XtCeRtLra+$1OSo7IN!F3e4HDkR$KHlIww?Vu@cVA7r}j zGhxX;znfyWmCh`g4xVf0!?xUH`4Y`dpXN!^5=3`(OV4 zY#!G!W)OifSD{0wt#urd*>CiK`!f7=O6p(g&S9~7JiYhWTp4Qltfx15zqM80R93^f!GO3LQRtb15KUiG&OcQCnMH<20mH*=!YjruZESNnMK zDh+&bMWKAq^2?n(8I#wSOr$THbJBq#iIeG(v|f534->Bh_5;X>v&xw7^Eo-y^D}V; z-Gxu5PG)Lu1ZFSKF+kOt^+eIEz;~z6DFC0P++Bk5JeFFpL1YIvk5bh0-KJBE;1vMD1%~Ccht>-u4iSxsGVuppWYU4D*>BgX!`K{e ziA#GTf-ReeO?(fRt{@+^3)(koz5y_L0G>L1jrR8^U*`)5K9P{{2SU2;r9nJUV>Rgy z%JonfKD+$Pw#ubXsEWmm>{(@a8KLTM&~hWfRU$CNaVAUQKy)}hdk|6; zyT7D+acp#^gtsxyjx?gN(Icp%tg9&{0WC1QYNUs_S~xi{?uCeb_3nh02K{f71`&EY zEJ8uy+C}e4`DX$yGo-4q_WN3I!KO5o0@d>KaLvt|*vvl+|4ygR7r)mU*)eJW>71k9 zUNJ3x=J}D58D}H1&+u!tOM{g=K~Bk>xHE9zR7SBi!Y>rFH_?Mh+qYzCWU(U!6ISt_ z6MGS>p2SB1Nscy=fyv!FHIi1ykqMus& zZG6kIIfG7fd9H)82@SX7H-zsrtv9;&%ScG*v5V;x07kB^dCeKCo=Nu@h*|+@nL;mx ztxkaAfqMtVo(Js29hSHOXy&Ij6+#*9&iTm>LMCHQHmR9)Byx^#QL(W-i3;W4fB8~{ zHcK3E>}opERv~G9N!sFXH{w?Q{7{)&_ z?C_5!TSteHX^LdG^;gFAhf`Ue{Rf_hLzrWT2w5c#1m+SDL0+R^Z8;i9+htlrIecQc z4nU%Y&B^uLxg0DQX`*}wjNG?-?K4tiC>A_H2rOgbJhH8t7?qDakkuw zpi5JUwjhU-E;kufNYcfhTeX4kh|rVGxpqX517(Q1Z6#VJDx40~QMEHK5UAuSN)B0x zIxj15@5kWw110~?lICtkDtR?9`m65y;NCM$Zv|G0E1eoE)kp*wWoPQAMA;&o4l)S( zj-c$>irP`d{1rxCAX#!RfIGg^hFlrHumOfnl)<+qbWCMN+6>PQ!|#QP=%J9OBEj%W zp+~v^e?Xi(6e7h@eP9C6y4OmU&hrezZrF(j4IQxgZPyT_`~#j3`j<+F5e z)&`sy7(B=r>bAB95qAl`t#&4+)JhrQVMT^}C`3t#*&U9U39z=fU|FH{ky?5#Eg_7r}bP4{G;wQ%zPX+JB@bAX9Qho)~&f`a$XqLKn-2MM#^4y}_7n zMmJGH&U?x`HFY~i6i0hdRa<`UeIrSchQmx4O!!U8X^m*`i^V{M2r}fcSA?{LMeM_? z)9R<34<2{hArC?$*X_cf0zW(DfJ^L4OvQVwUXsW@TXr&(GyYjzst11lBwSYr=x2x} z_3m5K6KEOVib~*b$<2dMtv2t8Z^8*%p=$BH`xo(R*g)W~EI3B?MqgPd%0qv%JCJ=l zEiL1PI6a-1qkx-~ ziZmw+so$OtAN7laz`N1guYkX9u8XszG*Jlqd%D_U=Zg{Ue^UQkM+mPjH}u(V7uz-D zHqmbxrQ*YNb{7v{f7KP;x%2Yk-und=m2X0JFB9G#+9+~3p&1GWzNP7Lc`97{9aCpE z3-68dv>H&gFh>y$b+1S{7N&;GMJ@V*DaL0XCMVcJo12|@>e^Bc_n#qOypI4?hn1U| zNpXHRzkBa@x^ITWjYDyp1934dC*vAq^iUWh%u zGyTt`>iXu`lAUm1O%Kk^Tm{g$^E;;gcM5-;_o>j*dNE{I^l~+RDJRYI%{KyQtBp1( zPek=rn(j4?>p5fPq%v){qC@{|!ADXR&wjIAnG_QeVZRkLZc2;t35lN!cIOV3*#q}5 zdKJy#7}Eu>usc%*g4qPo9rN0P4_h-K$s(0=uIREL63z zmYaCWitfrrvy+#<(KF*XfGSxLvB-uU1;N^vvN-VaEOtcJXHA2{Rr6{GiO`k`uUGePf3$l5KOGzA2*y)C*sT@}w*Tq% z=1j8S_)0xB^6EGvTh=T__INH`OM}Qm$cHr2DSgWOaz}>{Rx{+*x@dg$k_fE^)AXSs z7;nj$$>4FcAc^rxXmQ7<8mYAF;$|0RH!~PXtF@m$em@jO?qB|X!4Kz$fZjSrBU~$l z*Hh~5?FLW(8jWA|@y=1(zh$~;@ruD9_s5)5hJ;-+mlK!p8z2y~%Lp0`1WWWv67F&W zR4L}8(qDb!X@9lew!L8SPg*QwPQ7iGUE}d1Q`_x{;j~W=cjaRz2 z8KcyOusj$)sUsn$=l!F3u7Zp3Hd#z$;xirlX0(s&gH z;@oZfqOk2{M(ib(c@H|fXMp~tOIJWuBCR#2t7K%2FaLm_Jv#}8el^PE(@bksOSv=7 zQhvPVwF>tOC%CJN24)^p%(C9*h3agym(enmTNQ!68GGLq%2lv9HW?#SWXiK%A=<&6 z^1PWhm>XyuYKgdX9UJ8o!xG53E6zCwCP$(8WW<{pMMdpX7w(kvT*=^GC@jEsJyguj zud2GwXyf&!_x8yP&p2Y&oHWB<#`BrIsatYl;>33*Jq{?+ zzrQ;LtIF1~yJFZ5`kUCHj?G3e3iJvxNJ=*QzkB2`+XzO5^R8k@>V2K&7*eu+m!3eK z6rS2PhDkACJa-S~v=xLr)QLqO9U9X?%Fbn9Lsp;KxI}dRMC#rB5V)1DRy~JQJ_cd5*p4pEuyML zWdG3_al9e0jDW-*NEw=UHZ~rEj)MJJ0C zcQ*l=9%siY#Dn3C$7DtvES?Lsg*bNs*kBq{T@OF5Fm>=+*rq+H(9`}XA+tT!Ho^-A zrUy#X)BdF_+$%?iTdD=63Q$52JMEMoI}%0_s2J+uwcs^^*Ws5`+$BysHK5ffc%7yM zxmLzRPojcojoE-+uYT})F^!gngEnCvTV`SQv;Ts|WTGTAtUwbx)L6&5bLKQW%aBiR z1<|{tQ7Q9*6yvXyfdNp|hay&v#Je~+TSw4B=gFwG;s?)6J;4UeP}_6L?k$Oq;d=V| zQ30o)B_ed+_mkmE+o>I^#3UHfzfzwnYi2*_6iH=bC#Rk(S!U(+d{7dFdUkB8v51J< zJqMJy6@V%P>>mO+sjCndQ z_Y!YM5Q7D=CUke{dhmMr(A*kkwQ}tMV z?e>C0*Q`R>s`PbxxOW+@8ZIH2g`zECb8kmq+~pHQ_a2M%W;A<}LW&dy<2n2Yjs=1h zo&Cb<5VH2;wKVrBp3#ILT{mrlch%&i1O0LEH8-r-G}>X{P&fg%^KLVe?9ms|MU&Of zJ>hr2*U!TLwW}-M{1~J2@-F#L{H^K^eNge@-abv}`KB)ceJ&9mZnQ5_*zXR&oA>s; ztQx4tTa>4&P={(enF|4beqZ<{E~OpyIp>&i9VCu0 zIU-NcR(gV8Jb_>sL|GrENp#(<4o}!%mt0U9P!cFySPL zHQ}JM|ICqea@?R^V|hO9#a8ANdib*Go7j`>S1!MwcpXLW^VFriJ%674gkh;q0G6~; z;a7mzwURg^ehQ$R0gJ{EvmVCK#e#?X2C|>Nt1y|!DwUW1z&0l>Zj;?;mhh(`@h!WS zg<&abvhPi@f42}e-&skk_^-TGapIZ{eS~R-16AqJ_jy|2u4Ws8sj& zbQMSMlSRdhm+DhT>iC?VWX8oGyMwIHV#g?WB+L$_boOhbCk=}A{i9vQYmmqOkurcB z;>$M}7F2&18XwREXhO}Rfdax{g-Et|nL6Y_G{mA%jz5V|K_?p-B}#}gQ_i^k*6j7h z5;(EN%ipWL=AO_y_KJD^pk#>tTHPWWsH-hJNF-7{`eu;M*XSd`%IPgq_wL1&=I_G) znlyAITGX0frVm7ZNk%mUS1oe2Gd(l`ae&jKwd_$i0wahMoJmG+>Dfg0MX-P<2C?j) z;#~AVb{1P%>jAnMapRk>SMi98zva+Z-7{a+)4rV2;wM7Lrs#YGc=#$K*-)k6ntDiN zqm`AauNeD5JkV}H*flvLF9LD(7s-!9n^@`~=ktNaRO54Y6Exw+l5+n?**HA2i#Gn? zJ}<<>07x;1pikrb6zYLFsc(+Ft{zpWPYh1}Z)&<4$tUgQa7_!^YWJQeu!2dk2iCeO zJsGk$KuA9Mo3~1&bQ_z0*G|(-4G$I{-W3P$RM9Kzina@4l{GX9m+$Avc7b}O0eoVT zT7V)w%JI3UU@rdgAbV;!cnBtXC^X;{>aQE8`a;cpRfv}GEiUCHlMR(5Jz0eu-2uCa zGq*sIi;#-mUXz=Q)Vvh-0NttDP&vYhyq)(zhIec8(9G#K&DRdri`4bwz{!wCoZPn}GphIV&i+hTif2Zp@dwawEV!+9AC!;3>M z2)(RS2T?F+vb=xajRIX(3vGz?@Wr&2ThkeR1m@0PL2 zOewqC#d6R-6)x82UjJ7zvse+ing*tfdtfW1eY3=lWTtrMCQ zr)Bw~5j(ieElY>Ap{y}KWr2zBfo^x287N!5K;H^genKvlxMqjv$Zz8CaFlIQ61Bn* zwlxj|K~n*$U!Sflg0htuFp3`>yiuQTyN#{buHY5NjtmY;(}2L={&`1#V@FP(zIfTl zZj#-2%-7}vPpR~{aNs5mXl;cT)t{&eqM(A!-9xcQgx8 zr`j8ts}{~r@7Hj2J?jY&PO%RRplm}o9(x_tDH1D1Fp%xAt|z!pUL;m;lH-sM#!&Xr zGqy$M+=o4)Q`a23^3pYqw9~Z?Q58NLwTJJ~z$hm$yFyr_*e#q0Iak-x+$Izk*Ck*lP&n=eYhWsZPzfvPq3+lIj%6-RB z(KMUyg-O2u@R6)dL0O?11B0@L@R#jby3&avlR2pk)m^X+P3zSTxzys&`1@G@`Hx*B z1zVYQmw@wXv|l8E;24ZW$Tb)_heuBP_t6#>u(jbL<7=-@Rp4k~wVPvtz@i&L%Q2yy zUE}Amv}8BLrjb91^i=xjxAb40#3_OcDrxf#OMLsZ@09rEPh*4g6Z~9p$EloEi-G3~ z#TWyRz9vDDM>)XGbnztSTC;)jfbj1G=ABP1aM0e{w`ZRv?1H+@UxBIyh5{YlY%O0O zbWds(Pu%)-_T3_v2HDW1`>7(?~zT`?hUJk^`O?k-Ssr-7lQ|w}N1$-uv$n(FtVxOaa4aAAOhuhf_W5M3s&@ zqTcB?Ifgp?HC2C(I*p);s9u+G(rH(D`qth*616E2oY;j2A?!$Q`N~l?4|3;R=ldY! zciAWZRp$Gj%}oOQ46Sn!&N;&FQwsKqE&peKn;JhaHmI%7jM9nrb9R^0n(xbcJw>Gi z0B|W^lDJliX{B+<6+Wl(>z|@pr0kh=vmSJI;QdRVr2iS|5Ac!p=wo-$=V$)wf<4R3 z{`A*y*72p#I_Nkm?;|Yc)r_(TUnYq7kTLIx%UutT3| zy(2G(-SE^Yn6-3-kls@i07~^Y&LYW`A3vUi3@03-^m>qP=%JC9NwW5$OuLMVv{2o4 zhg4z#1molLO-H>el7%JXirA)qvJy1pK=;z5~0?}Y}hW|{+9FyN^} z1U84>;zh8ud?tOz+vR|}kFhnY@hE9nq%|rRf+}9L>3MR-;~G9I%12%aVQTVX;QT%A zuZAO6EdDakHt^hqmA|SdjL+&k*V9`0(RY}g0|z~3=iP7ViBkm)N)vmUB$mHv1)y{TBkU>8`Q(2KEcQt_sudqT! zbX$drips~^-zdR4if?WP0LNd{I2@{2xYyjOZ37hqo=%jy%Yn5`ElLfo<;i7lJ0&{2&j#JQxt%m=C&BOf3r_!zuo zgbf z%rdJ~I84pd-1JvA_umTvGM7+8EI%L9>PV}Dgn^4)U9qdZ=BW-@n@kig@SLAEIN%Uv ze{`7nJR!jvOO-_&J|kxQ5A?+T)`wE5>64wszAhTmSdzwQ&+^}ws^Z6*>Z70JdWZ6E z94~XPfNUTqwz0y#dZCMfod6~DOL!I!B=yJo@)*uc_o< z>5RDS>kRS(7tnXZcoVjylC$1ORi}w6fJ#69kda=|W$*jA@jdC*BD~GM&kbEMs4~$9 zuC!m-n+fMnnD+?ZSuBp3>b*mF9PyB@IB+``liE|7-i}wR^77HzcpSz){&?$g1de`V zC)-gK7bbvjwBV^^A^LHspGh3@Rqub-IKAZGC9XU41~=rVZj?CW2R@-KKUGn;yOdZm zNr9V_MS3ADn%{2yC%#x%2ww9JSuLvMC17Z+csM{NpPe&%TKTlkek7{I;!&OJ7CK{b zqKld6_;P>i4I?QyR&SQi+@?&cD!+Z9d7>V$&_$<8aY!hqGBGjV3?z$na~cQk|BVPS zPV9WMJFH{8|K1#*&Ws9U%{xB;bUIh@PPf$Wmgz4o3O9o5&}^K+F}bAkd()uZO`J%? zDkbgnkJ$lDG@u74gkN~{;1#aw@_X{vBxO1d@iSq~B`2dVJ7hEGYx{3qWM)KzS2JY9 z*$&yGG}-v~?|6>#%1;=O;+c?*ZghIHl*c1k-yAFQU`S+B!0TzPtkqmhq3KV{1LNzy z^!|9CyqHt_-g=s%K~weoemMS(&Y-E4%SipxU>JAKK=ykMYY6mjptD;~!Wdd%15XIqs4G@TUE)Vzvcj|KQ; z*l)P#eUp(go@=QwySVB@(#D-e3?>B(s*V@;tyT7#$0S_uG~(FU&4Cucydun;Ez`b> zE+5_NM`VLQQm;;+XYeoL(+-_Dk<{v#*|eMQjaQSJ!1E)FoP}IblHU4cG&`c)WNTi-`efS=%^e;j>4MP$J``%l3uMYvWtI`?9{SN>6sDlgzVm%2d2~ zn^nK#@ZV8Ig=-4G2Z!Z*;uW+UnrA7*MLH7Z^s^=J=jK)6Sd8m$5NYT8wi7COfv+j+ zziv7Hr_Eob^iP}Yc&{Rg?i}H<%|CG)#;G<4RpQM+GS8$4Md_EE#Z!l;W$(+4@0XVP zo?>5GUQagG=wJZVnPTNISFYqmy4#~7*C1EVE30KkF*{?8m#(TYFOJZ4zeq|>X5gQ? zs`Fi$5YN8<9{un^6?^3%gXj`FeN~^JAe3e*=Uu(ZERr#U4>E8O1)3h~!Wl#KTbhC0 zV6_Qx|Ajz*CoH6iQRj5y9V_ga`bmKvKTFH=POUaQE^dL04Aa_Q!_(RE(RVqeSy2e` z4~60X!ve_Mrimc|#KJjJpRM@C8U1ydg05%kG#)m5r69AmDoP<52@Uui{#G1HyDh`~ z1EhyLE7svUVu|`N?Lc4GI0bC)G$nphpMXqAr?il+hHC(qW63`Bq&onu?S;7yLPu6n zxT$aYH>q&vW6MD0UYQ&m9MxaEBmyRTAqZJ ztFLIiIjm2fu|-y?AW;bEJUn@5}G!xO#(N3NNi-HQiNr3Fh{FCBCe)MM7^R~f`$1tXntwk)!6JHMl}~*?tZt-^R$#HZNA?n zxb(x8o1~L__;%1mHM66dz^1@IIym#>OB%!2UunB#My}V&%FsZyK}3}PQu0+oW(0kw zvCruK(wS~Ry{2Ck!)hmP2jlO0%pbKX)5~uZSBm6Zp&6IzenrIJzRr=UFWT-L8rOD} z{^iQwn2Xr3uj{`b;*PdRbkm{73^xt(dYaG-hSIG+Rx11}+|X%@UNp?%=)Lrh#7@|iX*bPmR$zkJCA2sgVDf--LECg?qZ_{A2zO>GI zc-a1k8=#pdBY*K)GV^2Ol@tT+Q@Dh;y&1@!Jzr0I>(}#eWGIa-qUrOO5;AG3k`|Ii zP<9H>au1s7-S75ehB?qFwL8TmqR16YEI2=~VG;o5I0}TNJt=qt2)?Fl>-f>OeY$v0aOsb}Y#TZ4c zE{Nm#1>?e+RK)JPItuNmz?l|m3|n7I10``H+$^;)I{d8T-I3 zEf~SW3S^t1=R7$%5$%k>?|y&M#OXhrpFru<>*h;R9P`~@uBq8Sm#O)lU&vJk-zvGt z`IA@LfabDTJXQVI{!_T9&c2C49Ed`T3U`44^m-t(Mw*}ZVxg1p0!`;t>TRTl-pzmD zcs`4Xs6MD}ECo}h5RRC3>vgHW{kDH{IOtm_y-@FGoTIq1YrJTf`=+sa2EwzvfKb(p zb7F0eK>PtHKYU02KZ+JSug!fLz25PDgu9G?CB|9pd}m3P{sY+Lq?_>UVV#SRyC znGSp0scZ?=J(hLVSGTtb44H3p68582>K%~D*n?TZYgm+_pI=T!BQf6YQ-geYxd8s@ zi{#u~X>3FkfYc^D&tQq2upIg3lDZ~8#b&r{hA%JjT^r8#of-7$?CMNa!=KQM=Kwim zeycs}rEcxPDsqrO%%;64;doXh+=!~Y5(cA{X{Mv9v4hCeQ+dLM6e2c>3SQc_j`y}&gI27Z1s~H z-2EvDuJj_Dn_bK1{UMff?X&@TQOXjWG!d6&%x&Z_8lpuTihOD%|D7q$%*@PwstQ4$ z9?6!EZZ6~CuzQHD#~{{M@nAVPfsU@H+-sP$jU(RVW%euj<~7i*?RPUYAv`;}${&=a zShq>Dk7s)>MQUgSi|FO@9q@+D9#K3v&g{0Wcx+-qf;{rnZ3D-jO(JjO`WKo7py<)Z z7&y69lyD-b;-PyqqSt?i9ir4&d$FldLqoB{d z3HgO>W-=H3{8@rznLPFS7|$Ay9EX#057@l1TVMHZ2hO!E{~rD+DLA!-l$o}| z0fE=}umZFNz^hJJy{zGR` z{_ST`QtkV(6U6a@XI;dOO~238EjI_G#~62~g~*TlUfG37;O^o3*%)Zvs`XQ$OHZ4r z$dQRZH8S(}JB44wSLn%BA$BMvR9hQ_`Vl9yXYs%C-K_fi&jv$V$M>Ej&)_1jjLDGe zDnUxWkI8G+11PC;Q9+OgQwe&Pii(Oa?XC<>Ozs7v_MW0bhdP$R5Gd+%>9Q)|@ zAmM>wkit-b(UT_ylYaB~fM+$}!G>_=Lo6^IQ(gC7Y6qI19bw;6?Y0L}fj7RlRW0g3 zC^^FDLOZIO>+l5Gbn00}LuKMDRV%J2UFl!9W+sp2c!rAL&$1N4unoVKy}y4ZyU^KI zG?C#zAsduf&Pv6Etn{baKeaW1&yL-N2xfjihRu*B5&j}q4U)^lm{ap+T zIWWD54rJGO3ndE5%|(RivE$X*PS=ZE7#1b*z)7#7pDQwA-Q!8#Ph8RuQFkeQgj|hX zbuRp((CV(Cwz<6LK>y*9C|(CjQjv&fL}#{Mn2= zr5Z;inQgG2Et84Km2`2!{LFJjM-79}lUt&Z2!AsN5XR_BG4m9hl~N}ql)Z#=^rz#1 zIY}Q&oDz3&SdvdjIuo4S00;ULpE?gr0a=ZF5odVu(?MHyf;xc`Z@#>Jhgit-$is+o zit3qjgAr^&QJ8ByYByqaMGZ<`fg@}qp2`r9Yt=pzqClkPv%evovsB#h*hmwa5hbc} ziEg~S5bgZb)Rt9yF|hvzaJsmC=6d~-VE@6Hl-dmNKVkQ!bVV#zMaJQ8|0Dc)_7O6} zMI3pFr3F^EC!-di>bJADz(tSnUnV>bgdYeH|J?in&obw50&S+G7#6fHe$18$(kZk0 zp4@ou(pXkuBMz$xbBdf%l;7zD-(yocDNw-ttRk!Y{T(-TIJ`0@Z4G`l6k+F4ncd_` zv{!w4G+QOon;)qUs(=3)f4`#nB3gRoB$M8=LNDXwM5lMK7yl*vlP5=;+WHWj7Ps;C z24k9Y*u_P#WI~ewThe939o?peLd}H9*;&{!^%r?73ky7=rD)J)fEK1U=jiA1@Etb- z`U3k68v`|c4k4Uyx!vu)%*5Xb zzI73+r`?_rORxKK2-$~3DDaN7?yxKxAuHfS(V=)_7BVJ1$He8wAsn`>d9Kdx-9a_F z^T9YD%u%WM(Xw%y2=s6%Ws7G&d|3&+*5wwxDa7VVe&<2?>Yo=4F~UBj6^ipm39X3j z2I?zM`PD~7f8yf19(K8TeZmC=`D8RAu{p9%_i z_zQ;5ezxlubv0M+Nwys`d7U|iNwB_1`{>MU>y=1d1{aCWn#P12PfalbM&KV?tA1aG z9bX8pq;=bIeqbzCT-u$LV@t(MNQEyqsc;4^^6aFNc8`hu_-^Cn>u8l(TjrUInY3J2 z8eWHT>JOBshhm!;D%g(;H02lcqfRo3*h2oi$f3L$A5>SK$gy2oo*{mdLUR})XmQ?A zP*Cv1+uLVfYCP-AXAzQ%A|{ea!E=Z}+*MMt0(KgEvc;%yXq<$|*E=}|ALo$OPmUWE z>-A=NwDJ|}!_ylZ<#Z-tnR15YKQmf%pC1Qk)>E$o)eKw2{`)ZNR`FH8dhp*Sz$kF% zbSt|*NO+9?4t(w1&#E6jLP*Is0_#J=KJb~*^Cz%LJ0$=ueX*XOd_54aeLg11cM#W8 zHLO%hg&{Ye#RxEdSRP;uR|$Ij_;LNPZtBX)i_ncYm4MvlUE6=6t=qL&&#l*Y^wE$I zZJ0WF_^a-put&npj!R}14}U0zr7XiQm>JvRf5fAV(4h&Fuh(OZ0yI>Pp2ff-As4#k z)j4TI$_pu=Inn_iYt(#y3-$7koC^t}Px7G;brNP|yE_EtZgsa;r4Sv56Jx#1d53#g zj|ZS+6~{ijZ3-QN5hNRD?A0;gy$$O{$VzlOVK^eVQkvc!&9_y-hW8e(UXO84a-!`~ zr#Zi6bFk>2LyOQ{e?vsaar}`t0&5Y-!j{tIdE9S2z<9izZY(_k8KP~#sZM{P^cj3* zi=*D|<%54tNXaegb*qosLVlZ6*m!leJ+PZFCZW3`BFST<^(Wx@1k6N|bR#kgI%Cr^ zRAG?X$RJMd%Xw>A%;NGw;-ziA@J;Vfk@CHp$bf{ey4njBR+tW_rvt?wxj04=Lw)Ht zGVCB~A`YWEs<;e+ZcWL{SAB0PR8%3>CE0 zS7Wk+lh$IHCL`AxboU>&JX z02(s_6yN`pfLg&H%{Se&4x_1nN5t;R@3-)c8;kU(fOb&3vLqw2#yQoSXej0g&`0c5 z6S3W3NYl{rE@{Nkb%+2iO3U;&CK>||vo8-L7fLTYo;lPO?Y=()k2UkG3+JW}e&9ro zgm>G|Ot4OQl*%m{CcLkfakF`3dn7#Y)AM}E{;T_bN|;AkoE`s)%~(oA>bB(#iW~XA zXH)QThIC#Puc;$;SOQs@J5Q!OSBuT*Y=O>_IX&XU&$+lAmO@H{x`Pc`HW{SvH)m3<714Hh&-M;V^r=qFcwJLR)FeM{t zreAVoJ@>Yo~uVkCo_A;S5;IAhq+Ei-fLG7ih}Z+sd6!OiyWs?)jiJ z6J+arY;OAmdHGYLZ4S>HPznnPwj(JVsBtry${N3oV83wnJID6;J$jB!0K?Ebq@@lr zd-vkg-e&`-;A?XRg}r3VDzV9IvX_9g+j=CddFx#kt{y(LEd{+1vJ`f!qW{wCU|P4^ zt1feNes1WiJ*qRxF*m^%*LB{IC0I9qrGT6sT>E}PIu)|GdU-!xc)R0GzyX<4nG|9l zz>-wH_zmcRJPoR%g$yISzF4m52i)DJ1)T6a(S zGWMKQb6$`$mjECAdmg}rybCwRRt?t;6x)cb-cD$GeZSQ&fN&vZ2UI^Vt8arK<*R$A z*=@@*pJ{S1L4al=+kwz`q0R-PsyHP8P{t-f*GtZVb=UWk(i_8yWd~VoqF)mGT^kYTl^gB(dxTbw|9oB>vKfJ^!^}ghu1yp4}2WLm7t8iZ<7NFBq)%6KGv)bcr2qw@}%Xn z3&DhF#`V{3y+RN;#s+#*muc3N+D}Sjpta{Bb@g04v#cmeN%$ir`ab^V0GC4+#rxFR zZy}nxTIL}ygFXHYuoWRn_<`VG(&rK*QV5donF{~Mv`)Jwf%r$fTXLkzXU|G@m|AE+ zKn4XRbw?NB=o(%h`|aDW%>(Z$pY!{Y=)aw0ygO5yT3U(~tu1X1QK_>VzBqGZ@V3F( z&eatcn_~f??)y5dZf@98Fxi0*EQ{*V_wR#$e*Y#TcF_488^1=++my#+8hkfd;zO<8ERN|1gn3jOt1P0? z52h*9!|*P5DwG1S)y=UXuWdfstR;vOKfq6Scm0;4t-&iVpzgmn%WK}>XHOnM@4IDJ z&+k?c<%VEEKVB{+IH(eOY$v>D6H9(X4_b7oeF`5g7JpDbvq43TVNbtk#k(xCO1+q| zi`pwDAC9ec+22Xh&M4Q!A@WpZ-CKlqkj?K7A1<@d-_`V7)Q@Ni(YVJtoH%8n`1uo~ ze|zJiZJW|7(V9~b0_6)#yYKrWR)$;ML5CyH`Jd zayb}pCdUsc5y0c{e1R#3<$=AT_A_R@I)A*rv3t_eYIIA_eU(pnEN5BMgRP!O_q+L* zZh~i!ai%Q(@arvXweI)$l7}I(Aw6 z7JId>-x6jtaq{9c7436t+#E6TuNrRK*MFuMaElB1CWU}XPgjPz0~+3TNonbRM|dbt zc^kG-LPBEWl!s5;+PnYk?avqeF9YAX;#JT(Q(eAlQ(R(C9${Xtlz;jnI9>mUOj`T7 zVPypSg5Q4oHYfXM5(So98yWGn&z!n;o;lSMFPK?{Pgi|9u{vsU9#PE5h4^TdA+5rK zyR7(Tp2aV8lQ^_{tS~^jLyh0;_!uEMpW0yMdl$Aptgar7%gKgf-<@d2>Ab44rw6rA z(!y8`przmgVlEGnf5?qM8-T}X6Z>-uIAhnUP&HoSD_k&G-wEx%Xg6AqF;O$1qP#O19HxG8MU+ncF^)pHe@`zMFPm zp|tc$3k4Bm@t!tDnK=OCv*fB5^KWm(bWpg_!p>yP!=cX~Km#7&J!4lo9k*SfKU7~@t3F#=+WL4OEfTQ-D~sJ&-N*)vpIWB! z!_K<0v{wNS#WW^Zw`7g+jVWr?xik-A1darRjq&0{;O2pWe(8lUN-9)Bug`ER*QjT1 z4#H7)@V$(c_-ttvFc$h)501S()IDCw5Dw<RDwFL z+~Zl7&hISAY`=Uc`%jfY*rppU&z;RKJu~4%-}bCTT61m`(R5>kZ-6z1@Q0q=TmODj zu1fc=i4)nn4i(jr0)rCq)v*k9XNP|u*x6Yk#z!`?Jlm&mJ9{lPVu9gijGOEYy-Qk> z*`?!>keFx6GLD|8Hxt7I7Eet9N$wq={zU-dKPC}kj1)GB7Mu(iZ)s{%EWb}Uq5-5t zJ3sH}>CKIe#F7$zS?`s%&8HLQB+9$L2H1Zm8v%yz{A_852V739lxfViO4n(0FdRq4 z{K3tg?3)+i?8TF7EjY0KgR#2enisBUy&($2wU0ZBM(W_}kdxFvEJA-}NuXg@;;;5W zd^Ik1e44*E+ZTEy0#)=^Kkjy#@^5Q@w+o7tvYn70<75}aP$0sDFXL9TRYy9UPoIA0 zL6k?;M?(aG-M}c0yj|yiODqD{b|(V-#oYe#WG~nAWq}DGsQmj?CD+cgp76J{I8hk- z)z|MW{28UfAOngw0#|Ik5%P*TyG`I_qL3#6d(LsI%5bqE`|URvGCNJeym4sa}X$-&cFv`{Nw= zRIaNp);aeZNVa-t-7iRwl!?QLhzmeSN1KQR$R`z|-d?`s4C60)m$SK%x2Y#>23n+V z1Rx{ASu9%=z0W^^FH0#8jE=kdzei-;^th;Kc9do3fwA*G=mw+Z3H?DHtQ$T(d*=I# z6VjvIYTF+#=7w79^f1`4KJS6FMSrSGgS2C+zB7ZgZGV2}yY7*IQ7mq;EKqI_dUZN^ z1k42Xsk4U`v==ZjF&P>><9sMj-HV`vTWn{hW+uM;FK`8|pJ`g1zhUwXw9kvV=5Uz) zf2V>MDvsv_9|k3QwqV1W)IGvdBvBE2=YpNPC$g0xDz|u%ioaf&R1fU@TCyN*4p7an zkH^)g#~vG7mu;})qm7^FX%GA--@a?x+=(B6cDsDJ&$9HP9q~P|E#3g@?yjLMjc}UU z4>~%gySW=CXWU95$_uRTbZusV8C8Sor+;Tz(YynC#+N?pZAsKhmyvPt@9};@?mYzU z(5EEZ)3~4dRsw$giScDreBt;was4(9yJb5=%TUv~i{Mv{zKb|u z7PfA<*Hp2G4DT*%#eTGC@@E*y7Doe}>^qxIOh?7Qg2-}xmK30`-`NGt0E+pgV2-1` zU=KMEt`vlZzrlxUHSPu9#C$6+Hp#H#?NX+80hp|)X)3;l3_FDR_2LAe^KHArL=Y;5C+ET1(~klx(|?#aWChI9adeVK)WavLn8-k5&{%KYGElHqEVy3FT4G0r(X3jgV3Cp0v zV45f1gorCZJDPZQ_ADkLma&{MoICerW^S(GTR0{@?j2nOstG(hVvp;Hs@j@gnD|{n zg}HRS6@Qsyxx0bte$v9(UI~c!{Z875qH(33JG?vkh&GNk5@bkvwqgL89(H^yabNYS zy}kWtT_0NzPFUW2BJGVHq2J2YKPax;`ZP8^u3>CktO`hH=tS?|=lT9!x8tg^@`y{M zSQ(!zFq@ol=*tC!xTT{*b#aP4D%rDf*0gs}2y{@R8wpxkrn_;16MpL?G`^s<5(UVE zjn@#=o)injz(xD?RMx4&P0<&G>tk3j1QtPu*OY+DgX`fN+pLI|fy5Jvfp<_b9W5;_ zZb0AseeLfNAz|SDukAj}@%rj+-9oxucf4)-7cBy4QBS^FG*{`%Ec72~?c=pN#N9D7Tt#4jZ9B-GK%D6^gmr+)f(rHI^abX zvD4CW$B@PtnpBw9x|Jq|z)+@4rhlf!IC%9~Q;1}?>pEh@`Na(BDlE-|vn&AAAm1O* zpC6gf?;rzM+THAw#5G2sX4iKxMr6~D_G=qIGNY%a2BfS#UY%a zcvijhw?RFbE(N?1jWiiFBQ7@c< zJPJCS)!f|PPve7KIsp?k!$PW_?NofxJ}+4;FrPmuTsv_9=};i>*CM71gk`ufmVEt& z(jLQ5NEC8hA{FIiPQ*3t%0m9bS)!M8h^6tNlGEk=gp za(9fffWf%K&HeKCH}hnzC0_|i!Jt;u!M0@I@CgGG2BX23ogdNo3s9%m5nr30p1_xjhQ-9bA{Oi5HS z>(Y|r+Joc#Kr&4H@xu#rnUqWts3b4XxOdS7!=!NiT~6BiC;$*ck2g15Qx+F(33tT7 zE7Vj}_b#o?Dj)jqO#v&DzM=(5hc4zcs8NYM#d;{QOCRFwAeyztmJ@>G`%Ymv^UNWmmhNSk#633nDORaf+9loAMwmv!Kvk zLr3SEj;^jI#DT2d%9{7-c*YA*{AG$!%Bvk*x&=%m02Cz(8$VUR2BV3qLIcAkTKG z(sv1FHQ>HTKT$gthLf|MY)C!mQVy5q~lDP4w(w!s}ipQZFeR*ybTMuM-)jZ1CzXe zKR;+?2P|j9PWSHQ4lqH0KxdJ>=%++%R&7mesb>~}_Qk0iSuLmXjScIs!4E1g(4qn6D-RdpIyBqxyh!30q^) z7G}9m3xBo6hq-9O1(U|;r3wsQc^A6<1clYT0m2Z~Wf==VFnuEiCd&tbKF}Djg_hjx zzuG<4*Y#ZM)~Ht1eE`-%?Z&Fo{`?#M*s}!Q!M>%M_97JnQn~aW9k*>&gGW{q2Z&l@ zBga5`(Y%bBh-J7|yCQdc)73h7wv=VXNHEB0JrwYi`h5fgc|SF zN3Jl_NHjh2xAE3|jcK<^)J`$Xo`bA2o_Fv<3gqj5L=be$^73A|*N5 zu(x-xWqm~`eB3Cnv{bvKq-3Qh>F&{5mWP&yhlqoQIupV}AAFEdsq3B&(q|2X0 z%z%WGJ~AloE=GkmGtMT)TE$W#7ZUwEqO9u3>f55PY1rrg69Sslfp8`{6odY;B1&d= zKHsfb@GtZ64}F@K=Ag#!NsoYRr$jd8CjRMgqodzWdD}yB(mAXoYU!IzV8^AVrnbb+ zl$P0R8bk8mjiz85OeOxm*0aB^>)H3F`_$EAI~;dqW@l!E{(pcF*4r~L7p=YVPOtmq zf7iNXr~w1Q5)Z;j8KBq|6P*%yuY2n0&Hngyh+-)^Q%&?L>X2Rb;ER0o8q{=PqKTydfP2o13P|aAT11 zg{bSQYpML~FZNx9*6!MU7TOA|7zb91Wm)Ro(TJO>C7>9rxjyJ_8E*YRq|X?q1A=XM z@Ke~NB5>HAb`e)X=1}^%3SX%%_uFu-Lp{~@{Wl5G(1S?!o`WY9A%2IA7On;45F4NY z>ILm|`2ko$7PS9W(G7oU2t!R>Yk)Z_{_}bF1Y>i5sgS}CZzJ07m&|E7{afV~kPn2R z0M3uQ-X3J0iNH=2;G2DCm}_g9wrUV0!t_qlFR`<+MfkWSbQsM_@$fvOTcQWJH^BRe zaP6{5iF)?T-^yhKeB4OUykGRgB6D_FdP!XJK8#%wKwIymp|wK5jkkEFJp$aky{BL+ z6wrb-X4t>LHkW_<{yi;0>0ASF)8hx0QUVA}^k;kI(U_4w-_M0@c#_?^%c|jM9{AFo za=_)w zI|?Er^Fp$+ZgI4E^t^$-9dK*BG#CI7j+0bf&G+Ek57{5ny`bf2VskEOc~k#={)}7J z5=3zhxBTp4|Dk_S#{c!i+CDBU{|IaxR6UK$6PEuuf^^i_#&wwN+jA&zF8TLQq$Uub5xm(rG30j~xr|9ZEuXc-Q>)|a{ zR^Ym@Y=_Dtw};zw9H$!_Uwlu`dNyM-3L9JW&s!SA*pj%~Wjqso4pzk**p%+Wpj|FW zZv8C(^N3|~_>WqN>+XyOkfRt$pL-Zv zgZ&eMz>)+C3s`CQqU4rYQJT70U+?z()O_KOx~B~fueDy7jLp*8#mq%LykYf2PEL!{ z-MvOq4vCqDl-lF_DDn{Ks2&RQ-kbGeJGPXPy=c!<3=Z^ zECAMb+1C&N7XoQ@oqZ<9Tj#->yUfYC;iiZBZ!|SEzpkvT&AV*X;`r|YWejI$XXjBD zqje7iyA0_o)Xwl-<89o-ZKy+%bL0a`n4xOju#lj#N5Ybnw-Ngj_k&`ObR{2Gh#i3q@yq8RDD2kRH8_|^!`miA2As1wdd=Min(F?tLD&ytQr5KA&h-zt zU-ONz+z9cpK58t{5E%Gn0BdUz9sTKv`!axjxD+AICGbPJHp3%>Vt_3=;WS`8@;qqD|=8-IJ&a(dM%oDVn8i0u>Qe=2V2`&{VetCS3$eZ9kre3n_F9{6SWR8 z51D&=hf+3PmcNdUDmQTmRKQ1b76Wz8MA7W)sagtTz<7n@byY9^UvTf^*Z7&<9wahP zLQ1Od_hQC-+oGh*Scz<6ki6f@&(V41$5ZKABAVx7MJqE|NR}lu3 zmH%pH)21JJChH(3tHhaNCoZOVht3OMW%O$nxAbR&N*~cPdiFY=8%V_hAmD%E7o`8U zO9=w%s5%GF1qK`9OzOM;%*}|GF3#GH^gmUVbRxDF!Gj2JelL zWdJ_hgW#FI0ZV`y$J)SUn=+RN65oHnPX zyE4E<9(8ndtg*^AD01#Q=>*J<&vCv7q4eQcvv}5vCpjr^z=w{GtC~(unXU`9+Wx8g z>8S-z2=KZ3ZpEwB+t_%5khnZ9-a-w-=VG&Is(bfr+J$1xrRsG0zX#NjhZObS>%Xs{ zP(bx5M1N8rF7gZ@>(?Z#KO}K@T93VLwLh)z_C5AI_9q8C9~nK?_x8i~l;me*m5Cy` z8Vw-VZLXB!cujZre1*d)@#OT6!d7tVvUdW@I$d_R5ec_NVGC|=1kMJoi>r6NPNdhx zl@{MW#g#zeTBv66=g(?Gxrzet(6h|u;~{y~ytax8gIoam2c3=^7v|-G!avY@*Zdd- zw7`;|{(r{-gi{p)($W7dt`M)l=s7c6h#)7n@vq!hmwKMlxeqE1&1%n+6_;Y(KY-jH zu(~<8wYBFmT!b()f}!jzriS&`uiCM;OzhdI6}={!@A?v1X9AY$obv-)tVqeJvlgCN zE95~20Ld-2Mu_~ke-mhgT=zfPxMRMXxDifW!-R8UwCS)o>}N4j%i7pl01n0WHg_sE z`u-k}>ODza*4RHj=OR@0*F3z*w@^zjynVPuG+f)GLg^10p8qfoKIbTCB2DzNSr>(@ zt4-1hM$g1vw z=ep2nG|Kx^U)O*D>$j4xDl@;-H7MV|zSrqCdpef3Jy$sobKhIPoTnXg%vA5bGz@6D zY2V6`P01l7sphEC)7SU-Uan|U6Z~RlV<14`tmA$V1uul0^=y`>BDcOJi#K3_+mT(l zf#inVCMh13Qu{p%jD6NSsFyz1RI42)a(i7MWHP&atkP+U_@BG_tsbv=Z}HX4q&)0`V<1^V zQ~=v63hYlw%buNOfMjzA?vnw`WDhX^_X&h!v<}Hfg*Ex_CIg=htpXPbd;9wSAV#*{ zP+fC6#4<*uZS0`V;xwy&rBBs1noa#{@lSOf%is) zbEo^EwZNWz4%_7E)ADB@qm9C|8bzlQAmXJC>dT+$#Uz!4IK`9b8u%U=E?08h<SxOd<4)l?52lE$^Si;o^3fT7(@a@Us@)8LPzP1dyvS?Quf=-_-OFU;x_g)YASA9< zz39H8I3>v6Y(`fUbb>_E5T>%^vFr(-W8%W}qVIH6hIpD2n+3q2RPi)_{8juTjza+d zyUW~pHYYb1B8^~tR$z5vynaQ_%z=zBhp3pC;GCkR_sVF(O3fthAsuK7g54NR&TMzG zSsw4j?KBv9;<1b?C=g2HGFowrhL%P`>mu3dYimS=80(Nlk=5Yg=C?!Jp1TNtq`%Da zvdM9G7Wr+@)(?7YBuF1%cEgihsb2J!?I@2@_Ara?mn{6HssT>U{M(|)ZXM9o$OU@R zx+j|bO2shQ$i6#Xv9i!LYe)gX5wp4ZwL#2Cp6b?p8QVt*-F7K5Yxf)G$FsWEmLkr_ z;xVhQ%Iqwi!z!3icAf;=J#)iz88e3_3RJ~_2g}QpVI`c&d=RRRwZ6ay$poJ%#_Z=rfPP#cLzUZ)IsJ|( z=h+1Ttjcx0^y0tjB@3`smRF7>3^!C5Y!Lj!*tnnBqnT4Zfh|otLs-+oBD--Pf<-SM zP1z;b-YmV*Qru{`;10qqY5lpb6=Usxpo$Qk0HXu6XkpO&n?OxJ(qVNSc+U-CSFb%< zPSZoYSLlN-Gu0|l2kylphn%}b%crrSxjV&dOl(PSwkZKolb*A=uqNtdQ{Xa4xjU~- z=FSsU)toTo>C?}^5w-+Jp%0@&k5cGI#qLY-MYgDkt-{k&zbN!-2`2xkH0&K5q}8uh z@3`AhGgzMHy&XV9WbjWnp#9d+%C`rvA8ENiYc1d@{GR4&ru46U{K)Gmd!nx!HBWT> z6KUsx-RtS`XZ~MVNHY5JO-N(nRHCf}<&a6{gs&$J%Z#N&2tM)h@D?<*8cMxK^@-#v zaSR)$^4sX%`wK3}P~tgV#`^MjxRw|z=WcJHbJH?~Xlawk0MArZgv)tX-?8`E4xf%~ zqp4PG|5%iwnAlc&x0Uc<;e@!pkx`16f&!N{TtOHZtHr4k0ba**J~Vk~|K5Iqkr4m( zq%aUju|2S{Bm-dks$Q9x++wi}E@)}dCCN@mc(3)+ZowK?NOK3e^Sc<7%BX2(_U2?{uC)*(R;5=J2-#o=W7iWtR}`&y37Hkhl=cehMBr-=Zfq4p#XEKt3`)8k)9VWCUf zel-aR+9L|-8T;t~;@?)HD6{bdpT>5^k$8C>p6Bf1qambj#b7R8Zl0K7##lM$hX!iA z@yis-x;E)}&S_+ncF&))bVJ89tszKz0$)#Cb8S7X@Gw$3dgfvd+!Xo?(M{hDFDSu` zF6UdD8-C&xgCbT60@f_JCue(qMIeAlVWRQ9L!yJybb)i?(>Fxi^f{7Z&!A@f?~+%0 z)oWuW>0@eZKB@l|jXAW~z`L{qq1B1axB&%Gus`6D{lR0ICN&rh7DiLT$v)-BR{x=}&xp2+>w`#f( zd;cFSi;v{p(EV`vGsf-+(DCpR0yE@)dF=~l3M(pZ!9uDVx9LyTVGk*64pL=Bp*LZH zyTC%s!#_cr_&xeIig-xH_I*Pcw8wO%rM3El2cQQKp5o!`8}P~Ea1sjlTZEv!(=L!R zO7p>s0;;_LS_*YwGHS0Ev{>iPXKVn+>}>5Nmk4C|{+(o9ZQ_!bHx8M~P{DtFZmA3# z36YPWQo7o?i`ivvGpxS?7~6tC@o=CE<4@5}MBpz=(^)&j5edOz6odA!>KG6oKYU2# zWrseZVh__&fE}8?87iYfTv^^e5sm!nsK}NOaef&G_y?bUPV!jGEW`O0uE&8XDJLZp zZFyNvm=X~x-zUy)Zusl{oiV%d+7a6cUUuYE{`Ac^%XvOVEaZ653v*-WjN(a8?-0I0 zv-R0N^af36d3CkK^A`QZ^XA!*#bqv;~i!hH6qN9DTX8U^PZcO7B;ZPy$5 zI8m}!)w(sQb?nKP_seLb`$dyI)Zs+~2Mr#iAG=%ylc?v(27GlGeyQnr+lCF0P09ibj70T~aR85a-=4u;@w1%=}^FJz~c4D^z^()}^PF;5X{zO!0FEe-N zzN-e0dq=YPBRDP)mKyR(A0Mwa)va@f7_eddVeaf))&m$pa%CK`vky~y>S#_r)fYCU$4A2+{{eZeb1wA?_pXh!U9 zF@OBi1yJfTk&%<@9;_{O#Zn^LJQr2t zi#v3czl@oI!-zv2TVolJ0eJ(j-IMPp)tC%-cPQA^E$oJtX8+hXpV>#f%uzq9%ALuu z$6bcmPq%rbt$vy1H7)Y9+Wsw0dYxekK?VNp7s5hJqh zlpb0p)y5pNs2$+QIH><|z@zI5zvb8Ku8_6_s?SBw)}38)H3?Tfrg;@vqz~x{OHg^U z_f~Fuh}~&wY&hCy5FeCd>i&F`X$Zq*HlI}gDS`3lUH|X@`v)TRi82_=H~l@O>G2VM zCf=nHJF+hE33#>3wDm3Mti}anQJxnkyglw4v%kCjOJ_avgSI+WQefh)Vkp>fW5UqL zJW>FYG0=)vJFKP0xzdvA{+aU2H-2>wed>hWu9BzICJ*zjM4%VOA(zv^CaElYJliq zZF~RueR0CKpW9@*PVURYE~f;C9qByRxLvmbug)cJR48HygF-Crw|L`m`JA>rHs&`S zgdTF)hIm~-g0^nOukA>il5l}+me1FJ_zynn3DWK_V+OTW)PqWm%kEA!MRa$GdbTqY z3O2>~OYx%juI>^TZzBZ3ch;4b#k?b;b@peT6Tg+#!9Ka3#Y5WPMehUPZC?)$uIHo;`+lHelHWLe#x)j9WGd-OZ1f8R~X7xw5@(j zjO7hXFoxlN#0o3~B|lZeB4;Pe5`O&^`}v624wFoQ_3DSWL-eFPb+G4!vHzSA`Y#9? zF!paFQQ6>2XQl@WFo8AuA@%1+-$=hj5Gh88v|T1Ie{zn;+^KM% zlfyq4&XP`8miNFv)ixzNkT>vMX{xU(Wfg{;K7TBeNV^_?wDv|?IOI6& zp+ePuY#4R*+h_j7af;ZmP=|nx;oNv3mmMl<^Rt$|P&)hg@2IIs>foUtt2)0XKh=CA zJ4iT-2dTih;}2UY4d!4+J8x;4MSB%jy)%CTeTwHsfpDgxnN z>@a;#`A>HS#xJVKU;u-@I=e7(tGQGL{`LffCcMSC?iAiqO4=|cJ@)6}7urEx-Iq>o zkJQx#q}jJgYWi!LrQeZUSxXL`{J8H$&@UcGABx|x zBS0H}>w+ku>HX>IfGC&c-{t`|C9io9@_h z+xwO)a~mJ6O-FdQRlr1)A{+gMTE8&;LrrKaf!-hS7N#Oqs2Y^3aQv2-)U&$a2PhJ~XyEo&jwK zMHv{ySO10&T^xF8j!BGGm)F#c*UvYJ8No0~A7uu9*hL>;jNzikDtLYa4q1*_PwHf6 zu~#VTTO54%ISI*04n%BheooIC6#*Nl{dkUH1RMjJ+T9CSY&rT6iF&l$75j`s(vM0t zQq5o}j+_q0InBsrW5*dO`rK~m{)2kWYW^0}=Mh{J-{xV;jmNPQzvkdq?43&1wXTxE zUPSyRLtl+#r@C#TqR7*c86LRwOh$+IFIRq^&4_U+^u5=WNQxtzo16RM;rzhe3QXqm z@%Ivaf)$EJrZ+TpJJlLUJ$Ebxq0P5hSX8xr6rK6|X_)pB3V|i5f`W}XPIXd|GtX~LI z{Pp)w#|#EC-;u|@SbIh{C3M^p-VPRDPdqm<9k-a9GcAjq4p89k|AEKYm;JbJHv@E# zB&BsGE}hQnmMms(P43!gJY5MF4E@Bs-gZS1pYGH1j8rfHbs&1IkcsNcHjmJL5ii)S zSu0Q7y1%vMP%YNZSycV?IHB6;86jHz;t3f0Sm-1ZxNbT^ghg@)kJI$iIp{?VW}?n& z^E1C_5JLq=Sm~Q(rfsterT6NXZ-8NuMCznZ$^VPykC6oJK08iY4(G^I^l;~~3dIOTtU;Lau#Yr3 z-@Pllqo6<+t|xKc%g3nK9&(f!sq zLiGgJpJkI_F+#7=p`lkJIwRNw=s7#lN2u3@i?44T_>#`%U>WN{@$-k@w%&t>4?nlI z#v`vDsu~gho}P=@=(+X6LAc}N`1X6|Rv43>cn*aGx8K&zma?0!WJyWC*P&qs zB-96DvcW0@$b}vhL4Dg}!#M4Xp(Vs(3(?z!!;WpDzmVahpBN=sJ_etSFYu%vpkxN+ zlbfDaFlw*keJ(B$`+dyPUDbIRDbrhv+}k%H*^_m;u`uLZY{>y zbLjHSO_U0^gn%COBurs1^<1Ya7`_gCLaImOENZp!XyVscM43Q;nDS^JW*D0OmH+!8 zG>^h+Udg0akPPpj|8!&lVsmnX7iX{i?AGL<`Kq)o4PyP9i^DHf`~zSi8?~4U71;r^ z3i~94>;NLny??JeLNx2c`+`dcrVh2{9~^g+;B_`u-C+c!*&2n@!2|PN^{obkPry%+ zKLkVvf`>S|omLtAzLdFeQ*IG6CV;rOxNZfYy2Dh34#F7{4=WK`g@uLay{3HtyFYeN z?UO`oMG%t9to?-n4U;=cO4#^H24tHW)eO0~$Ya<-xEOt<=2{e9O`WIG)UO>v%j{9g z!@vG(%_C`^P4bKWnbLj!ncY2id|rbb^pDRD!;6dSNT<>;I!0>vTP~%G?QTFtGdnz^ zkl{n}0h0R=uNw%-zlR!qJ@x){f~-ulv$J)Blm4~TWUt@TV9HYGTs~qD^3cQ6h--5On@5Vpf zUEH?=!n`c1WqYZU3T;eht+S7ChgkL>>;3u%zsjutJcVZP1;aS%2@l%zV9?_tMv{Yz zkIE4cEC%NTIxt>}W(Pz8uBYFuAA5mck;b8ak1GjEuq3@6Aw0jw*D_T5 zno}`UDQD#>j{Z}9Pe#=|z;v|RMr@ncxptd-U_Z{d=cCUh9XMXv(5TSY-iATN7cVUu z*Wiw(T?0BJZ+AtSwJMP^WneyJCS*U) zUVE*z_olugaCAd2;n%>^Hy;%NRCSiBcM;vsScXU=F<1-Y(E)GqHcy|E+$N8RV!~lh zZ<4_%DiniPQVaV)3s-Lm#Rc<%@O*?=X_vqVcEU_8%7s9R*vfXz8Fe#=(oj-;`i z`R$kcE{aTV*0(ohr+?fsB&4w*e;?(x-OtUN9jmw{4QZY_JScvhuK4mWghI>|38I9e zOaPSB(S`j2!rQX^GR`2;z5gou!^R6tz%4%6n;_W{zNl!*fQhsE$ty{N3Fco7P7uWeL9hl9EN9 zTv0`?Hv}i|D95&M1!|c&SEPFj4lNnfvv5qP{gwCnAeMA+-VP_~c9cWS8E;cvd5hgF zDo$S}yRmk%uiV%C+QdCVM&fK~DG8+H;ASs-qpNiw_mzQthk;DTjf{lreee8hDqN;r zPu4XYNa;^5usW=T1A@PP8|S38W*eC3lM;R&f<&1;KhQOnqreW{Dq}S%wUB$%@ZFX@ zY|wD^bNl%GkexhzZJ?OsOUs_iqgX2TKaOGuP@;WPue$LSIcf~Uk^oH5(IEej_>i#4&{MFJya=6IT)C5I3gMot!b zQWx-nLqI2lkD^9%T&_5FJ4cxCf`V=by+v%;ep*@&*^RG%r8IM`47&KohEuJchhyBm z!?_d}%1$Ci{TZT(&MhfPro~EI2OqQiVYRsFT__@b!@AdVXdB2tncTz}if5e4unF*K z?x$Kqx2am1^6-N^+$jkwbYb7j?F6Ya4N;IfPWb{jXfWqw{CrZ;2)Z<(vg|jYCOfc) z&&$w)$P{R(isy=AGtNzczx`90(+Fk%ha2grAUQq8UwA0q*K6lQh@R1xs2!f8qz z_{4Tfm2}3X*RI_Dh&w)k-vHs?Y?pqZCg`~@iat2fQX@#8IVgalkSAXXq&UXawM^BM zJYuOT35H4D^Tz*DO=c(g>^4quq^;VpF2&Bl5pSGiKaru9t$GW3r5G>a|DJjZu7^CN%|L0*3h z#yuLy_wn+|X(}elV}AI>edFHC2TF^JNe@9eu?k-PXO!9~dvkLRDM)iHof-xttd{|H zb(QSWF7Nb*Z%?GJxuB1b7@b*fZplP`SQQN27g;xK^MjeuE>#T`^`)qn# z%DR5=8&};dEP^2wo%1c4{4CL8elLP4Mv}wwHshq_kkP zl{lzUitq5RdbDR`1!KqIcL!U89cbB;62cW=N6JNZoi(W8M-HOsdS{Lu{XM0PNGi;E}xeaJcPDgN%*fU zilpWW|4vn6*-Cfbe2+PYNV;vQwUy6{Dvs}px^`Iu%fs-|jh))=7GJE_XWeias=@rFL+p3 z25(yYfZg6(yt=Mm&=-3sAz{_XnU|-Kpassaujl-I!zfj2&1z1(HXs^7Mbf!9l4W1~ zG;Ls?1jR~*yATQtQA7)mn#o0>TGn_Gr1P+ox*lVh6xu`vm6}oA(B4KmZFMq|h-W)9 zx=ToS%p?nCni-06NRs`koIHzxr=w@^6;?=x;ES4!M=b*g^~ns)9l?`OZ(XuCOFV@` zXf!=Rf?H^8T=#%D*yf@9)1bwXO>RQrI`=fC_)ley{;VXJET`QgMq_7pcP)JrjpPepXP>t|h9b8z7SHB0WS?)^tNIjSl={x5ixzugcZ@}z` zay&N{MY@-Ec6(akFDp*_(3@r;e1vBKG;RrNp5d%bzbiNB-JDK#9kJW7H9{><;+yM+1Q7Ize0Wq_BV>Fpgj&6zhY*t9R@B1uaziEY-^ydQR zK?ZJ9@uXMveS5uy+|GrSS~HvVN+m7n>v<8e>zQND0;z)VEq=B3GDzIYuh_rToZ)%#Hn&PG#US(n-RsZm9%|>cf z;adb%giXX}QY^Qze6L)JJsf zvmIgg#G}dSqEnMUIDInuC$xN7l&p(pgYcuHu&GQl7_T9~EP=3#zobB#6nt?Rwq62_ z0qo1+-yIptqOvy|!1R*E1L0M{7$wH_b*i#(aFYw`_&Lz@gc9NNhbF~;(rEx;xjt?a zpV>0-q(t2brQDmr#na(prJK))FFB#rnjINb6Ox-J=#Vu-$(vf)Du`y%i|3FpW8^kD zK)T@XJi>}uqHAnhu^k?trk@FZWNo>?c^-~F$G`vRezo8Ppm}QR>Stfw_%pR_!!^bJ z4#1v|tM6A3e`gwF?FvGml&7ZG+8{CAQBK!>4poV~DKMWa)AH$e+ zOc;_OHiV>SUmgo0f^PXNy;>~~ntYLT7wpC;m^Z%u9Zo?;cM)O`dRi#@T*x&0V@Ncy zz^}32sh&-(sEBK!2a8_tG;7>~z&Z5{-_CJxaFYsLbnzJ@wO3Y~9KdmTTm&RSt4nRG ziJp(%cDQ9)<0Bs?HO2PiQCuZ4STD0j zwSKhcbTIb}iKNMvl3x%|b!TV5liyFSy81Mo=^VpDMfC|_bq~0gfC#d^axj?t2gi3J z*i*!ZP`mNlh@9%UE#sZYk2{h4Dj6~n0jML8D@z-t( z_sniI2DLA6(6`FoVI(539}5Dp zbmZjJZDU-ee0;Y^-1#-66c21`0Eo$K4o{mARFHY^19!>M5_7 zl9sDZs)`GN-944eP=yf(w#>C>hl{ubo_fohBxa|N_Oq3pY|fevWQRfU?)V4~IJHXJ zx^8EnZQr|ndzgUEeJDc->&g<_?8(8Gp)poSs~l;Ff+wfh!!kVe(0G48sXKmUog;6g4!3C*S)>z!TwR$1D9Y-GEgvT@ zENEcP&NM^NPD;$3!;c|Oqy)=4eGeXKJ85J*v$U`nz=b}%!+0E@E8fCmLkX$h$hzgr zm_mNT&cSY^Lw#0pqfd5~m{GJrui)pYSA;&_wH;UM<4P}hkw8@D2f4^JP%cGuLv9=z z%S=euPOX%x4rc&pQ0hCox@3Om9h48g@S&u@!Kz$`f)ilaA+w0(ptvg~1x3MV@OO%v z2|hP|{%&e+rj6(z5y|fha&h4w$1tQ!vpd=IhAAEw096h!MZ|$5Vdw!xZ{37{jDk_+&)78>PVs0_cwBq0R$~@5W>>u{;po zPbwvHPOHS|RKUo>4FrvnMv1E zC6@3NZ57wV`1p|QBXxQF^_-k?=7+_>PP2fFg6YO&co~{DLtP;>^>=Zu22K! zL3BHvI-3G+c=%C~Rnyho=5r}IdHGx1mBEe^(sXJ5z)Lf+&{Wr4f0qMY18KRX7!e)40zj1TeyM-+-!?ViM3cCE0 zq<(~_<1Mo@4;w0F<@Ge&*F?b*^KE2oDm3bglf&}^NBUODdMu>%1QPW6!`BCmN1OAu zvn#Hou#^2`C^=dOkguFpRP^PDEbPz+<(lVlhPY#s`B3TAuP)X$Svyi4grSCm!76_!8jyy{s~@8%`i^I5Ihe!h2p+*P+AN$Ss&I}N9ipSd{8 z!po1jPCY2Vvs~FrRi$=g?S_vlw{*QE!NetxF7p6KQAgid*$A;-02H&Mr9)L6Uy(^q zkg@H+vAqAF$o}7oUnJ4xu_TJhYf$4R`9mfLyb&2vI#_a(H^&t*Dkxcq+~MNlA{}Ne z(Vf{P`N66^lkO2-P#|10W&O&SkZ$pbu4#dT}DpcRo86i2JjY)_*&z7^R6EG9TuqP?#>P(rG`~yMs;CpF_b`Qw?<|dRgm?{ zef(~3xvyV}oNFI76Z!D$?0xV@Z@csWiqsRjLyqA)Q}oCp0tE4+KN*BMI_H96AxKl2 zqEaxX;=wsd#qRE|g0(xy5-XUkk?nn}#wEr2RlrzXaWQjPQGv_Jl3GcTFVB2RaNc%i z=A2LiVoSQd2lvsE>xm~Rfj?bkJQ%kVBqjBp>sM8TH)}!Ai0K)x96zNzMI5yu8HJKl zc3+;nx5q*qaW2#`IFju%I0w~LPiZF|P(3O*7K6$<%%L-(J5BJ+flXTML*!kKQf%79dHjVFl4zEZ@VAdW9LroL38xK&BjiXu?Q3wI- zYas}~RR4MB=^G2@rlzLG&IY#OCw)cNmZRcnf3)@c-3jki?T^edQK&^v`F?ob5ZPO- zrPfvdmvHR?A<>GkP5!&3su-vp6%~8jC}o-wBKtlBHfX61~ zcv;J?-$V}aZpe{d_;^xZVPpYlMJhtJcjo?^Li?!^n-i_UUMRXws?tWp53@pt#`%t5 z4)R?c(C2xEs2+_1pVPZ`2}UNF5CP# zZ~gF?rGEE34Q!kE;0;GZ_iAaP#?*LBi7fd7=S%%8nN~7opC;FemY$3)@=fe%*RBow z!Z`%Dqs}iR#oe5t)l=>(Zm1_{MnRc+v@H}ybS=SVw3^GbG3Z7ZO)oTT@=x!&mQG-1e3AwphSDF)Ok2pGz>hx8+uW-BR7ixMuu88l1^C?h;BS(KB>4@O)a{8l zc)FBQ#fqd~(SiWiCH^(}vVi@+IS2kDw9pj;XOd3xBR8h(l3^lD@8+p=c7BmCzwrv| zL)ga+BM!MZjdV%gLd7r91vdBiWq_%JFM_{_^mWC~31NMfC|w2~$b&0Fe!sybxL0W& z%G2L5r*eCr!mjwVnuIK3xJhjcIZ5a|>srpGPUP>E4p=ZG-md9pB`b9y|FC!k*Vytn zG-T)_qjFmLlsaS1ouxHS2#9iZ{PnzQeFb9q9@T*t_otqo6()w1?2C9bHVS9gAm<4^ zrlnY*23xWrji436AALuHwco%8FEhkv&$s)MxA(hvoYS|YKO7yMT-~0Xb@5K)LlDdS?cWDK*G+{SrxjIZd`t1(aGd>-2l>Ez#5n7sSDahE zrSOwSVw_SfL4xF`g9iRiAO&eceYe;odC{gJnt5H}dDuvreQoW;)U~6O1*7|OuCiJK2YIX?4zreRd!+cj^cC;Z+22WF5t!}58X(S>iScQ5%~&}Jg5=f?_s zeFM8hD>7oHk?_jc^-mqb+)X=t9=R*>b$X|F32+qa&rWB0S96=1+KFkSsan1r$Pi-L zCK%T`kSy+)^T`>ED)EX!mZb2Q>)}<-;G29#V@Y2KeT`5z2SrY>4Lf=6y6*_zGH7>N zUR+?+@|IupwXjKs`a9dMYf7gU5Eg19gT3q15-q#DDw3I$tQ`J}p+ZY$U2O3Q&Jo{4VIHb;M1 zvQ_~WXldh*ZY5(uN+Sl3LHOyGb2s1dRX)zE>qaaY09jeR$;ekQ2j<;t}6{2P+2Q-|bkrW{AlYGbhaJ zA8$>D5s}f!Z1#&um^F3Zq#fhAmw)7)6K6ugZA>6PXB>jJSsq>bM=9pAxRezKvY;)? zlbD8KGt(F$gzu(0Wjp3V|fGvYEr^n6BkSBWC*Kb>HEp0utTvLXxw zwZAkJuv*Rr_q&WnR3A&d5wk!7TSpttkR7wE(b3VQpe(gs6=liy&xRVx+ z5J*K;czC!q(7t=APq0Wn4KFK`iHtJ<@`XgB*585nhaN@8MS$r*%yXl7peX$`)h@e7 zNhnM9BW-#=eP7n*3oH|M8G>^M_1LNMO=!x-!X$I36GtEFM zp3%HD_{sL?ihtmiz4|V@csd9>z8>X%3q4JQUCe;n;lB{!r@c_|H)yd40mIC!A?Nb~N4;RLPkXRV}onJ^eYH?ZmoRVY@0MS)RC^yG)K-9xB+o2nwdC~6Sy9W-^x3IEm|c*t<&DG3COwIiXtMdKA&RRcJ-&EQQ{e^*tW zhWUwY+xV(u`0(}^R2}}E?>mdOvj)~zd|qkpRosNtWczuAy$gpMPrJ`F3)Oy^RlG~p zgyZdlP`CDqKj@j+sqMrja?NMdZ5c+ zGx#?8S$b@v%jZ0l*fd74Dbo@ZEX~k!c>7;r`^}=eEzU8O$ z`Gl~(;JY&?=KzF@SKN_r*8CD;)%F#!D>eS#iLY(&OhLniPPKftPAm>u>T+RS8MQgq|4v{hSDc2Z)zh zF3fU6*N5{7g~!!Bftp@>#ihj>9mNKm>J>aD2T}s51agV8d)*eb#FY+1_6{LnzkTI6 zvKgF|=aZ6mq6sjda+X@n?Ci0POiXwe%L)nPfWcrwqXvVx-Mx;2UE9+Qm&Sv0eE3?b zA_jBTlOo(#ZEsRpTJYY!htMs9rdWJ?_;dormwSi=`&+ z`Ys|{yp^ZSNl-)WnJ4Q={Z;r9vJ}+P^xY6w2Pj$1F;$4K$nQn~h4y^ti0Yap*{O)UQHAtZp5jygXA2rbD_c==3Gfri-pz z*)Fpt!EklA?V1BGYjPUt+*HWHc-zme`v9)EJrOMrqsPZ~iHW201 zfBNVZlNN1uc@9C#nJ=DV^QKzlShB+v77K1*ZpKld1mqhnmdPtk|MjQ{QeCuOLp%q- zmil1e+Q|y}^7ONqOdN9pv_=W7zeEL2FZpNOsVfHrK$abs9M@L?^#oBMqVRT2W3H%h zn%E{DZgEs@NKL)UIi+@sIC2Q~Bm~|j?Qozau(U(j7I72YAM%X2^HpR=9K8Eibjq5m zsY{!9q3{!uPBG->W88#j83qYfKO^TPBO?LkzK_+01_t`KN2dvRrGZHlIoW}o*UAy> z>Eu78ZQt2AG_iAMYK#YS)b0W!i;B1hnM`F>e}{<;ckq@}RLBwSw4BTZb`K3{J$Z5^ zS<>8r6T>QDYMs$!NdZ!R3LNZg%9cBq_N_i**MKJ>5C`*MVoV_U9Q^#Wix`9FSwiCX z(|7srgJ3YV59wDQn;6E8t=uE3=n~FDH$72osHLyv3qwPX|ug`9Lf=gvxSK%Q-{ybjlBS$hyQs(OcH6GZ})+3Sb9kj zak%;7g$J%5ip6ZRa_TAD=miL=sHi-Ar18**r1yc_Faply(~Ns_D?cxf;_YK_C`GO< z6Ma^yOK(Q2oM2d-gb{O%$G1b$W7rY9-U{cvu|@kLTP39_bG{=6YX}m*UD_B;5|f;! z3mV^-wo%~1n18r;33wEq>ZJhI?JOzP;_W9S`B|6cM~m7x%Qr%O1A+r{3HZjd$0+iQm&Vld5otcy>|Yh4J$@<%xD|_$2JP zt+gZYQh~khqFbH_^`J_trbpZoknz5Roc1zl2c??vzZ#wfYPS-%@K@oyw{B7J1sxt7 z*jAGRLH$g6y)es$XUA{1JtEIM@P#8j$Sk#{?tj8cYH(ecK4Va zSmvRzdZ26!n9J*RhlR92a3@n@sfn7NL| zkpr`{AtyhdK-WvZeOo?#u-9(V0L7vSqW$G|y9BTCU01PqWraRE(CTcnkBWVx!GXZ% zcLw}oY-82Jcl3?(3aPc$qNg+V9c5~>yFf?&tV0WEx_L(87hzfOkm4OqWDloi#xbje zm0kgMb>tofH#WfGlpokk%-NsbLQj<};aG;)+|CA_er0yVpG{p2s$3P|oNZwpfl`oD z#ovKI;-AO3gR={lDvTzzb@eoCgH-_m*LsKyE`*#9F@c{dE1})56OeIdT2;#~U%C3| z>466z5xTqH)p$(tQrc2e9y`3t6vIB~Rujhe8DG#25bs`V_O5`H@Ne&KL-kW;KLg5c zMFTEO*1P(rCI4*o&8AfO0tBV{muK~O0dd~HZyN!Kc6=QO0i^izHU9pp(ji3~j@+#!+f0T zungMSS<||gg20xx$cO1S`9ww6Z~oc*B714Nj!=;LJDco)Fgk44dL@|nNKx~0gySqLnmG@XA9aB8px%KMyK19g}B9$E1VOvu-J(%$q5@ zCT;5}FNaW5?{(xzZX7(K$WH1f%++|JE3vpX#{*7n$r&U?!*L4`Y9zjQx}~+^2FDc? z>ui1x5D?ebom}(9WN0qU+*f_-Z<;atg;I(j@x&d!bN^MVLjX_X5rU&c+(y7HbLtp$ zyMpkw$-8CSd+toI1}I6jLmoT51sFs?y;B_pBMo(OZY|!ItDp0^>ZhV8LM%H{?%{jF z8zoT|vee;&rT()F0j%_n6a;y^aUtO$lq@qSOm|^}(RFsVH7$-=#`JYBQ%&tpm7g?~ zHdf;t=C$#}k9VEw<<8DYkbQmAAy38zt9Q=*d3yb~8pfLax`L&GUEeG1cRzbb-R%2!;Si(i{q-ftd02GXkf6qJpl60@=CT1Nw{MOs{@6&Fcg}U$obLH_T3I% zzzjL#16aTC<=Vv&x&kD2GeVoi<~8UZ;Vf%B?m|RD;?Kk!mwB~|hL6w9WNFCYp5vja z(WMPig>&`qV(6OZIE&H%M8)^2;CLx}%^?y+U-YzwU4t*@OHM;@>+%6T84Owi-{~+( ze0!4)M?gUOAq54edRtmt(pFr-xHJfikR;(DjrC`0PT5{2UL^5iWj%Yp30PNO5SGkR zp!=|a|ZydONX2pucc{>TJNM=Am zYDO}Ax7-N~n!gv9(?T2){JUo7Pva{}UGWT2@N3*)RPbcil>VFd z9aHc_&T68qld9u#F6e5DvH#f8$6dV(liWBUTpWRL!F7~KJ|W=_1HvUAK-(G*ByTg< z7Kb1^E-o%=VHZlc{gf6I5<%qTvNNTwr|1D;a zpdrAu@gyNqtbQ<14io?s8ll^XYcB40`D%D@&{D+8xLXiy<*GgZ>(=#V;=@_CY`5Uu zsLEh{69uCAkNwSWDIOAbZlu=b)>mYd9OqHLmDcD4tVLyhw}`!1C+0pd(a?WQLAoL~ zNl3p#iu`AE-$Yu@T09&EOSYcYe-OKU_+Fxq>wr~K^IBMK2LS{&T9&0uLrcpxj%}V( zTS7`_Juj@`TwPeVAv(+}kuaP&=_rH{aySePfX_lBgi;~LJPn6(+9mPZJDZyd-YLPl zV<3I~S_2N~pWSC@f7#XYl%Xtp-XB(#^P9U^TJbSAyM#OBL%a6hz7;3h>hI|`vjAI< zBuKW4i$z4lG67u^I0T+?d)*B{HR%3JkAVER{Zktw zYXfm!TYv`>_GW#){15dJV8}&0zYL(!uMNUlw#%2&2SD+1ntb}Ts28H%f|7f~r5t4t z%5xE}3@j}gp7}{FOn7xMb8UU$?#W$~{vp$-hw0UPz(yJZ7~)T+BOCdh5?5hiM@)uX4Ac6KFOPKtLO0xmRLv$E2T!({RZ052pUrd&i#qW&HN! z)SlClZ_jC=kVy!b(I0}Ot!znhg&1;g%DNypg5Cv7Vl*~XExD5J5&`*f3F-WL+zDbS z8_@eQ@rL-7&6K?ME*%u_=Eg6r%hsJ8%88vcbu!G-(h`lu*6yylYm0vVc~#Cf+>eA< z!c8zdV7ZqWdpZQar>FOZok~bpcqvX{4hd{lW#HhTQYRcZ`3rZ`cKQEZQ1@iy#T^v( zv3f#}dhaedNPDKA#-#A^i*@$jb$f5w72QtO@8@Z?Lim~S1uH8KH7o;^!BAYg_45Y* z8202h{q~>P2jb$qT1xMF8VXn*b1)1^ct-b#t|+F>n`SrN*j>0k;Lr6y#=y;8U%}@5 z>e83_`!z5Hhjwl4jEE4FwY|>Ac!w1(W}g=sZj_D4q9k80#gvIPOnK!PK`Z7!4*stlt_F&MIv8S zCQ(_eqtn>=GGuO5yN(Y>MTF(2WLK0dZ-y4azujMeOO z^k%Iiy@GPw1zJjqtlFNs8(H6GE}NAHPRyf`BRfMY0(Oq%v0FBa zPEJk^J!bB1bGpAwE~L1%umIJUyku(V{;$CDz2_LH{F;*LQ7W9>-zz%B-|%PAWK?Xl zrnRYjBj!;P()8j&g^nNzhf`A$rnNQRclpsJd{qPzf(JV79z8a5^EA8WB261#BB7I= z$V*CpMHJ#$UttpR4Nw0aZMmy;g!VEo0ImR$_#z)28cG6}G4ZS_mh6>*l>>yp- zwMT~pv5?H9APL~f94uGgpwZe&59B@1 z5OOi3txt>8qJGb@XfMK|fkU#Ol_{P*?lgqc4KJIIMLe(exCt#&UKg={;2_URdS55GQcKjN_nnbk zZBJug+JS6ZlB&p4_h8nTB)Fw3&0wbDYXj1(hWc8d4kQFpq*l-~IhCy)j(7uit$OtV z*Pk#mK&#j$0Xny;h8=P|35GxO3>VE4WSAz^3_w2{LuG|zP z?BT;38s7{R1Gad%_GTG4(nIvPO&}&=w>;$l6PXzC&x^;L2XM@udC7V!7Ujvyer#Xx=S`5G{x?!k#)NIR+hpxHufq9dZ;~&YoLf-*JAxpVpQ% zh;1#>v2fvMWOC%Yb&I4Yx=`RzFPcc?WNTOKvI3lp2dc?)*2-!inSn@LPIQ5f4t}B9 zaor$0Cx-x#7}{II$e{aw?vq+n^NNd8eKRg@t9G4M%+!%jC`Gpa@AO5eKqC(}BaCVg zbWXLSwgvj}6ntx;w1S&x`j3Aal=^+r;_Ey!`87KCGeJB1O}3K9Lz29+y%G+`S5KQ- zVd|R`fvQux?gLbL7JBq?%FT`Hh8s2}DGl!(A!8H4x|WkxsktunXHCaEfsKvT$0-31 zvJu4K>k2o1#mB1ZT+n$F~7o#bs1G1p3#;^DTg- zHXpx~6t+btY3;4=oGd)VEWA&dH;ZL={jXU7x5uz|qX3%V=WUPXH8H)9YWG6$APvl; z{vnH~{Cv`PGqrkXHEz#U2Yx=aw2&Y|H5+y~zN>{#)`rEvaUfp+76lzu_ugPDxnj`< z`1YTTcDkZ@i+*a>E+^^ZUKu*RvF&NI_gdAL*L%14OFH|mOuS{a*L%dSj(ak{^e*8W zF743^?sYYn29)XZ&gI@a~lclJ$RVY4Mr_r6&9UP4 zsVe70V4~^Dqy5u{@m#=Kg-5 z+q2ZRct0yo>v_sw{nsD%20S67J0z+t|3<->0#&)YXihvZ zI~1^IBJZaWN@_IZ^cR`;#=c z&-P5UC#*8)+}~Zv$pSCwvkCobVe3mS`r()bk2;yd3m6xhYSBB(^S6B>S0-I*kH@`k zQV)u79oqeIwk6q#fPk^8Gx)X+PtzI)0lBqI>8!LgFMThcm#?xQpd4U;N5bVoi4HH6 zyB!Mr$XdpDxC1ih?)0VOx3h_Eh=$h_39Gr&{2p45#Xk(!Z;aU{3@;b`d15DN8rt3M zWXKAb;rs%D`QlF8kLn!*eAj)wQI4ztD$|8@J@q%T&b{&!U=^?~NNVFIqlZmQOsvtE2^9V7%T*i%NJWHHl3h|z0l9@+ zG2{1s)}oKZ!*aC&`821REcUcE+*Y(Rgw2M}x>um86Imv`-$B0LbT==c%$*;L zmWq8m`cgGrO@YO3t9}W0-;{5NTut?idnaIT=3TXITh)=D?k%vtG85NprWlR z{1Wm!?DA{>TJk>!Tl`PpDeuFLqW}hl^;`dr%fZ>yHu&+X2{A12G3Yl0{Cna^bpT>P zeLyKl?)vy~*J4^{VPkbZ>1nZBw|jx%w@9AG#*0%e)3Kk}R+!wc9=s)^eFXWAtgPrAlM~)&fCr z=y~)=!S2H8st<3vbxUb!*19^hcc|ed1I1o6IaSQ{!K4zY5^{3##ZB@bU4mB&aGH@Y z zX}ae{24rgT682RnDYv~TqE z^a%BJ_zhkL<@>*bQr|l%?J)gnw{I^n*kcFV z+FF#8ny}iK8lQG1%nwX^-%0GL!%J8*T^7vlAkUlbDJeRh?wwrmku4**GR6?c)A%?l~{+1;ER0P3zwM2-Hnw_|ICzIcZG=}KEIJ$g?jo% zmS(lw8jSGNEyPz=Yk#!A%zLZGmwxSyBT@ujf%<}@RdA+_$v$cTeAA;IEme-AP!^db;wv%oK8m3ek;P8@AW zQ`q07@1*+y$MfJ>TXE{Po5`#cX!jfr*gM2tN3eR92FrV+jQ7iWZk~L& zuh>LWl`;BDHsopOCz9*F&yLHZy4m_asCFUO-`*`MZ+_!k>@PXl)5d;1FZB-2@7WT$ zy?>voR9#hV9hJ}oZCSmdbDUV019YOe=~@sH(FN%~s#<>)$i_x3(_N6lKIK-i_4T#6 z5DI+A;0M`Y<;aT4Mm>^;xVSqnsVR&A3LN42U8Y-ES0Js~yU8m;^1{^CNZ+4DPr}3Z z=i01E3a6W<6e*%r;Fr3b%R0$^O&(Hw$3z(J@ z%|8=C0GI+?uq{^POeM%|bq{eQ&`-+*JDSCZvycx@{zhzkc#)yve@W*mAyQDs$>wbfQi-&$BLsix1B3Qi=L*%fe9sw!|&;3sSO^$tT{1|xzkrYX9&94Ej`Lb9F$o$Jk1_f8B*^6UN+W20EP z;T3fu5~Xzz3}A=eOzSNH8XO>?17=^qZoPLtkLRA8oFtv|857ZZJFNEGi8O>hToB|sUPR;d7&9> zDOtj5We*0ET$=zR1I+^Jl}B1pAUZmI2PSgxgDI8%f$m(QW*!wq$h#>eF-R{nx;dhi z*z7lPqyC5Yuf9wu0%R`zp>*?y`1&caWaaj?q)zIWfI=zK zWThR|5{qDbPfkhE@$>y=LTbjdDPg^fcCRydGRugTu{TiK-T1fCE)`l$h}Cc4IK=*& z{wk9TFpmMEy0w>XGbQxUbs6uwz^gun#4n5KDQ^e~aK3?nivwY$nVN^LxiBy=JhwO* zf+)u1<`TYpHwWhDi_gyp3_s$+ZGo5*V2*L z{s?rAW($&MrOP?zPW}SFD-dX0fbHOV<5^YDS7eyDb%P^-*igOe3}2BSDapT2KZwEr zb0-x84n+VjBN8>t!RdU@8YucZ5%A^S_npx*ivIKdwe2QA(Vr0U-{IDC60NvBF6*Hb z&)yCAa#}lPM$rq64zltw9m08RUydhU(*7)p(vAuh! zGT&)nU61(z7h+MIE#YFokq5O^xRm<-!z&q5gBU7c=N&KCg(fdOesS5^H1bcNn4o>{ zYGqFI9C-h?*I|kTL6fQ~>Fd&b0UQmfFb29KJa~Sox2=EmM&}(gam-#6E5)fszdL1W z%>|aP-KE|coW!e^$fQq&cJ$E?Z8};8)Fq|j;&PxEh}8!q zwyuJtojib>Trxz)wg9w4mT7&;zZD89&`X(Rk#UovQ_T7T(Quy&^BV!*>X4N^ts z7J{krQfl;A;DllJoc#g6){jN2b zo}XNkT$e`|yg0qv{3BCZEq4@0g4>4d0mF-iAH!(?GRLhtx_#qD zy|^(0j=p;ySheu0pb)UyxXp#g&+aQQVf6WHN4kd|o0(DU?+<>;l8W!Cbl^a>G&FEX zhU1*qnp5wf?KeG@b?K3b0_W?d6nE}+S|4xS(iZ+9O+ZLEn92i~wY5|cng83i%P151 z*Bqj(eaj*mK&6Q!+z;lIj5Qy+})qSTnvi>T@ zi*62j12m7LXMN`vP_1E#fyWX6Ly{)=T*|okX~Z}L&Shq1Mt2PX+=_n|RdoOjr(QSo z;eIgxLBiD62Shuo(4F`C+N#%8IGt~*#e0zI4SoKGyG3dQZ#oxfe$!U*dvZnjUdH(5 znh*qN9s~Nto=p#qFDz{mnUEUvEq4F5%Jw_mVH6I4R^s>4hZ3`V6WtD-6Z?yv4`i5A zVCIH)Z$b-)R%Rq!tw=Ym)5UMS)t%z~R=YJmKK9|3sj_^9w`{|!Uwc#UV~XIk{GXN* z00whhu)F)I%SwtKb#C+YGXgB(E~M!tg%%(hE`6Xemr3`m&Hd)@l)c$vN2zW7sNF(o z`&_0H(2wgx9Ax~MiLP9ue|AuvLf=a5_Pd|jBbtB_uvNIOluSd;N#xAp(cL0TbHK@; z4OpC<1d5x=QIn^JKwkEfxePs-CNFp3huRuKIiD2pq>sbN#>Q6XBfQLvr3yVyQWQEN zDv+mW_uo3n0G%iGQeT#(`r0uIxunK$Y{ejW7T~6AvB+%Wei=N->oW2%u{6W_V&`nNjxo>E=!k@Mi4G{Jot97r}zl+${0P<>5EAf6B zRIpasUW?wM=b2?2J1?;EuQlM8Tq^1-LJpYSLruN1uw!Gv4UotTJXuk{%$|QKENtsB z?2?gD4B^cbMhs#LMJrv=QH0oEuHA^QSuNAK^9XSz}=s`@wCuUn*b3bh^hGcl`;!V{31k&^9PT+e-5NuA^?aCx4W{6YWB?CLoh|>RJzNkO292FrM-r?FK9aU0N(uEGs$O=h@#F6|rgi*Sy9i*SckaXUttU(?k z*9I{ew@bbk|Ld=0@BhsG$Kl&6G>>->?9uB9Co6;|7*xmkqe9E_WyQiDDT6Qlr@N zCRy5o&86UauU3P_3QNrl#oYy&R0TIuFs+d2QxiS|zGTdOH0YKq-AxfLb=&mXI$d^o z<UT&iFF|Y6f zMAu8|@NV~SvI4^vckJVoPq^#wR}-W(T;8#V?B~T<&!TRRe?6ze={@^#IlYP4bs9?lXkLPLx_)6a~)~Qg4~V2cps1n^3fAc zCidXXUWx)a!nm89Gq=GssE#kem&?J%@tVUQEdQIr3-9Iw>FhTnh8o@(K-1%fz68CO z;E2swfl$?F?BiORu63s1F}f?R!mzNiwzH$f6JU5>`%dNL)Y#3jBOwnry1W2iZ5Tgf ztwCP!xYHT{as0OA?OC&Si60RvkS|VxD+-38=!IXqT0S?v$L}8<9Aspdv&`U%r6S)` z*|l|cj=_$M~6 z_l|SVIF7kI;j0h8SNFK!wL5zd;&$yf@KX{h^Wb>^7b1Rs-y7`j03&1f&}X}gm~sev zduturOF{c7b5=loeI65IAAsw9c`1X_-3{$;wh+Rg@5xcXgovgVA~l7# z+ROm2IJJH%4=Aj}sjexe_E*UeBnQ$RvP=Pih0lZH-zt=OUPD7#bN$Sv4YtU-7=i^G zYAROhJp|rn!v97(K!r^>rc5u~&aW%`uRJw5D$a{e;UwEW)sC48ZzD;GHlZU}OogP@ z>v7_y5t_y*=s8w{ve9|C5-zcuo0o)Z1KxN{O6J?^BGNF?)L#E8(I+1k@oNJ+)N7x; z9KOf17|e@^dW0RnxD@c!92V%7`u28J_YL`W)SAGu`V#_(1}-txSWpOh+3af{{u|P1 zeCW$PplWEJWq72HRZ{ zr=hFcDOT+gHwDOm-cyfV+D^xx9B4w zv!@k%R|lclHI(g!`@7hPbkM;9Bu_4El<@`(TB7CCF?&Dw;_{}*7fdoOE5zB zuC>azc&{u5&u@J8+kT+$z>t^eT+F%VdtlN+lH>3n$qWbWAR$}6bTUDC}+*_k~#IgtiU-Cn!1ucoKXKz+Xz7}Vx- zwq1>>^2OQJh|DE0#*}>1|5YJC?f80^22}WpL(rAGV;U=+yt!hy1&9?AMs5?Mr4M5{0?sj`L2(FKC4wbf!IKu;DtqX}i=H%* zMbIQEd2hT6L9{n0IuNlS0|Q<-g0+PTM7XW!f1Yb=alSYc0bHapvRKKM!g98nI%;oz`| zuk_$*xys>IUV$t1qF6LIm>g9PonFT@cjO%F2Y}=^-%&x;9S{yMcXB9;sY&Y04}RH9 z2ZyGtt+^&P&~~V+Qqa3Sm zNT13O91r7ynU!AXfLQ8Fl+f%W1MeYQH+C}H5Zc#Vw` zgN~{teY?M}3$nlyC6jyrG3W$amSD!=_|$_EYzxq3T$ulVbQzamkZVaA>q-2774*j^ z#{nQKPldPC_z6oX=RxpAw3(&JXFidUztUw-&kMHS8{wG{hCsn8&ha{LVIi}cU@+Rf z$g*j+$YHGb_S2^Y`SAg>tdB6Yt0q4Oufx*s1}nYQIVvUV^>Y1u&i%gC%iA+6G0;#W zxCzXh_xEMps5P65g`jbZCJ@Z>3JrvkevD>gtTYzzElGhAhMYD$jT2{Cp02r~1^vXRorl)MtWE zoO-1sn*`e4r-x5bW8HI+U_0`Pt#I*cel(PW;Py!hbc)M#t{ZFT&Cu4)o(ajLGVKH*!^Z^XU~G@HA=BK zm4D7Os50(6^xm+=4|_keS!;Wq`^CoEe?ogn$=o8Z$B9*JShFNR7N#UFZN;|?Mb4Tc z=jPBnRXgBDiZZYM%hklIE>gSQx!q&e z;GlvNMx@q(0+r0xbcef?o3r@73i9v65e zA65bhBJ;JZ{Tchz0G^H&upykk{t1F6V7pzW|1&PK6^UA;*ju?klaoi$9QH8fd~Ku~ zQpaUn(@-xNS*;|7idA5nmiU}8n2Dj0hDggFeG_MMsXz?IAJ^y#SG ze>J4PY)YgFn-nonBDKnmeG0jZgBl}1p6y+1H&G~!HKx*&`R#pQ&Rfv=nj`S##DuMlVQG|O*xSW|8Eg0YPS;}xHX!qP{W z5=fe?uo-FN)DcJ!e~`)06NdVuNe6BZGYgAY+w?&Cv*X>)f(L~<*P4ITtzUwtD{J=e zi^JFm$y*ahtv6qDN(vrkt1yZtKM=US8thGs*mTqOr-3IEZ_XIX4Y{3g;Gk$+wO6?$ z(&S+mgh~3siGz(eYUF{j-m?LCt6J6SNc7iyvyv&t#Uz$--#H7%*&qr@m#J&bCkCl9 zIf?7qi&O4-do}u-&;vE?I0?OEtfpS=aunmw-Sv%@I?=v3B_Fm1z4jp2j5$(66N?Z6 z*3-s&$3fy>%!PYw)%NFw2Ze@X!$Uutd?WR5JQjJnxzU-MVqs&>dNQ}gAjp%V8FBHZ zPp#DcObfNP&Y)v#?!-8yma`FLlQuG!Ht>Z ziihs>-a)O8;2@E$$GZCZ9Nz4RhmU*)arbuTE&`|Pj#Y}LYVdBix7#WB-WLo;I*j-FTIk&v&Mxgx9fC972(%{F$T9KNy%Qb&>5UX_ z8PB`W)T=p6Sy4HRs4fI$C^Z%Zo|^7lDK=A;V^rs6gn?Jn{bV9rDK^FMq(!wd`zawJ z3zsAZ<&@eEF(QPVCTG;_SQ8I;AOmSK5&S>Y8=e@S8>u%!@bK{ToY0^y>fEHDCtWRB ze<1D-p~VlR(L}|lenQEc?Na@YTEw<5jn^0!)O7PQ(XLY1IduOj)v#-WnN^SUQ|7o% z5n0jDMYX0IJ<&El*l+S}X#`s)=Y9U9&xpI#vEj)C3N9jy1R@uyBb$DDS#(^MOgwGc zhXv*PDbd5n);E|tSkL_Ydu2V$8Xqw-3A~ZRcbqDl(m(W8uMAAH&@Y~ElKCY;4&95^ zZ}M&xesJ!oG+E&p^&adZPrl8QpLpS4SBDr`Wz)zGzADONPN1IsR$5*$eIjbqdHfNh z+~?(qr2Dy4t@gExA5CKgq5IVXALQ`ua3WSpn#AdlhsA-Dr7El4#Hc6x%WX=eh}E_y zOLu{*p#hOf2|{d{g@w0c0!y_OgF(lFm=pdP%l;A8LMG^dNqH&AR9+(lCgsZpPsR!o zXH-!;Bl?6rl7N7C zCb|yt5Ck|0nSo&|2XX3KeDcp*;CQrosyZeH+#>@d_Y&RTJpX9*bJOyxkznqghpUJF zja37ryik!zg8}wWIZoKFP4IA&s*okHw*SP)p0`0^R(adolkyNo*SlYG#DClIJswo_4sGH zM9uiv@GEp9tC&+-)bM)Jp-?486XTLBb8MZ7Ha?Q)$(z$n^}Aq19!WFRdf3&= zq9ffB(oWw~wdE&~g?_BO7Y@8~oLAOws4+|685EfhKf*>$UXo?uV`F=qWy{z*`jHMf zEPHy~#`j>cvY^o*3IiSTD=rfCKMB9(Lmt0VKHKn2;lvdt|2wz^l0Uae`a6xG~qeJf_Q>uH~2it(K4!o1&B0EI`NwkN<3xNi zzn89EX~>Gb&|;_vrLt}3V`hZ|nf`RA>pkWcwYbL`SMkk9U6x~UJD2(?($5bEejqZb zwNXpErM^=NpI^%b)qLV;>c5&IJ0m$s5C$RDSd4vR+rPNELVu7=s0(Eg@VcISyq|9q ztdw3QIhKfh!Ue;E206JD3*xPqz|2U1yBh0FRs1*19gKaS7C5LEI?RqoxCc0*02KYg z^JSiI7P_V8H-8tG>lQWLl85xg(c~#t1>T+=CD_6#u1~ut$CNTy5rCH@WR71QLx*(P z?exCpS{TflY!qT?b_w3(xeDh8F<20L-G&(l4lPIzO87xi~-Go3Krv z2jeYkRzLce+77S=T%65yf|gj#MCWsbx)eavrt))I_Zc%r_Z7Px-%%cKchnqxbf#Fh zNuX?LrqY)UH8Q`{kt@N^h1)QrFK!!r@nQ6B`nv)Ad%;aOb6qp@ji{xY0SBwznj%4o z(_GGHI|}1x@T;E_HqARv$1FFl$vgk@GD;FiC&x z-F8vPc}iXHt}!<}{lu85Z*%aQrkYyhCtVmG?kDOd{Ii%B8kx<=y}B_?#hG4|=>F(B z&!-QK_*5lkKkD#O)ynh-gbR&0;&Mc{U*`)KYMJHcsl6x_zqR(ZgT&TUYhjUMHc^hU zBCDq6(jmXW!!hqh2R;;=%)wSd6mKwhT1z&UiG`OdoUyu81XBu>idFfi=lBP^f=y$Bl~i%+h7-8quK^T@O@{5$ z<)`u-BRE{oT@n$E4{mU-OdTI6Rnv%D{#3h}O-SLHo!J(>D?XBy--x-1&wGNz(^|@L z8l;FAN^qT1nn+gQxwwDB-~pCU>|v@wtdht$Fb-<(JAVlrWTSz2-rUti$m+X9wKY{u zaXjPA_Tj^ad+Nr}ft_3s8?>J4MO(f;`=)R&c&@EtS3K4t$eaOAL7F{H1uF<;m;B=V z7V851a)0BHi~g6@q?yI^`FE8BMt%ubTEePVwHCMXY1Gwij-L@#N{JV@d^k+e)iZf| zV?{PVHY%N{&o)T=hf*9Vg|&}e7r53pSkRCrSon?@dJX5y*I0T;hs!X_!! zsQ4sTX6UTUh!!U>aIOBC@eLHuAdff6VBpuHuf(qkN8hQcIO&C0GLap4c;3Yc=i93H zrbrr*$||i<=R;~Y3XKW3KJ+VQodlE_XSilS!xD>j==m6Q{&37v&DJ&#*LEAZ z7YB7L*ex-wCI>C=Vy}T?;R|>le54=A!lbH}uz&jtj4ffaRBwvVzz1i%#t%=@{4imZoZe200d=CRMW}j;hcySlo@{9!#TbqdBvT@D+|m2N!IB{3zE%M zvdKB;@m^_fzn}Jxa6 z8d-%(u)*P&?D@@`=Y-uNjG;g42;M%>Z&}@M-f`{sLEkrConTe14m%=6#Px{6l$@Jw zX)~0aiNHCVPQ7us84^J+vbz7{gTIfD(au-8TO1$4r{+~!ztVj!_-wJ(zWBGdBnAO$ zhhvy<>>ME5*&BkE$^eHrFF2m3)2ri*;@23pKVt+QY*`USZ*MAME=9iC}%;4NZTM z2K+Y}zWUfGy^yA#{a6ep9mNCc97o6GOP z-S(6|H6d`uT_#fOPYM1l5#6iddTx~nZ2eIYHk9~yL=2`xqZ_O7=a9}II4IO(ATQ;z|MpZ$ zczonHLL9Ieixhy`4;pDB4J~}}Kxbg)G^EGj#+lOD!~h5%MMb1RO&62W-uvMYbe(R> zcWIy#Z1N+{j6$G%&+fCRUR2p#B|3MhXDSIwCmM=t>a-mrxui65Uu`%ihU3w+=&tQ- zPyI(bq2OYi}8gxl<&z--$h zXxWANeaf*KA*Q$oHEmc;r6MqBPh&F!a2=+9FuqITJU>PkDYS&1AC4gxVuW20a22p! z)$nt1amP2$O$2Ixz-F#75OEUVs!_T1-UGnPTf1J1xvk6B@sTc3;^oIn0rE5oO};lK z7)6_W6;_UH$ttv6$S1Z6RyiiY7ijFe43hA0F=8F|YRp&svfW*o3A;hECc}w?B59zL zhW6ocq3(g}c=i6;^#8B*x2Z2tEcL8E?(hF?iLnNTxXbSiiSS zhL6n4@{no;ry})~ISSjIE91{s7V5>isKHAIA6|Y zc;Y}-Z3m<2o>$NMkzfV`1Go%o5kmZT8yLXaoaXE}#TVOC)hP~~=jR{pbYflHJ8+f~ z0&#VC-kYh?^5Bio2dF}bmy&?d*H$XjO41tHK@_4MfqnDU*sI$A9_y7= z!9#k9%LbAOO6)e^gr_mL8@H%u#qsA!hyp)S;g66Y)`4^X@f#^d$g6nVw1Zd|YiqoI zetxcS7=&g4%<2pc*9t>|8G3O*_9O)HJ3YTcX&yHhqe%_pru>3~PkR3XN6(5C_rUCy zKR&Xv?Egoz$W#`6f6K*17_{_0pSwUG`|S5g4ULTvr1V7sKuI2_aP*F7cWiW2OB(HA z3C`==!}}~BK?>upn>2B6RPe_bJIo17{(o`8ZwQbM$)nB7UVxv=tdssDw5pM3VnVB7 zN!ZwCSm$Dd0nuZ%kN}#&ITC#2<3cf5x=WqnSTeRZ*KgMZHS}S=wu?(09Zs~HuHZk4 zi=<$cf3D6My0GvNwZPB>d!wxwT~NTczwZbKXpZRxjpkwp7WBOnE`I>T(P-v*-&9q~ zCT|0N%J<5*1dwTe~U_V4IKL!zJbj{#Saf-9Qt06vAh^?!L4 zS1-f=U%!guHVAs8r=aYB91@hT%L6e#v8)1qS8M{GS!(PrC87W*5tGGzhncWc!ryOu zfJlhvgF7~(w5;0#ZY-<93nlbk91={_o_|)GL3ezy$96#lJ*$S2G|dS^TnCDZ6g{lZ<8s|GuBf;l*YLpvI zp|Y05bN0Nd_rKsG9YFfQH~yZc+4*%AxHfbsjT`|5nvvK9EwhyEA4lz|g^zT9lD_gn z$4cmrqh9^-Zn&Q%1V~@nO6Roc0l$c5{dX0ZWjzQrk_4JRNH`#lp%=$KsJ%|>@`-y& zOb&fm7mmDI;;(>upQ9H7t+leiUZ$340abQNa0-F*JWuk(sr1rW!!tutX)^|#Nta9$E!d`W<8 zJx|sIEARb?5Mv|nh*8efQ~I90q=Ea{kw{M_X1P7GSZ0Or4!Xk9Cx@~=IZr=zE=sUj z%*vBPbCrup)^>4mP+t=-!v_w{(>B#YjH%Sw#DLIGcTbCR6)$Uu7k!YYG@s@@+nj6} z6fI`kZv_^Rjn}&Kd<;6ZpN3wXuS}{#OP*u{7br=~^C8laZ9;X=j!@TLeLLnbLtrTS zbmLi(*$8gl=FyndlNX{yUk*OgAxkW3pU>`SIZl)@ZAv+5?&25}a-(=Y$XEi1;!Hhg zlZ%0dAh`TjB%i(npNsK70;vE~({}(Np3sSm&FWz}*xBJih!8wnu-&izoUkTr4ZeaE zZ)<6(zd65O7QkcI&*81G{pGQD))DFg@}aI!f~V7Nj$eK~^ws1lD0m9@_;!y_NkZkY z(?g)^7AWwZzE+WXimzE{U^DZ9Yzz++ICl~NBb$5c5xB@HA1a3H4 z+DsMPm9O!?ic+N5e>72T>K=Vf#P$z2cgOD~T74OeUwIQqJYaP1n1w$XW)VG@%E0Oa zhYarm_GVh`oHlX@u)2YaOX|B+VeW@iZLN^;P{VmvCpF_t0*!(Kt7^H9;$~;&< z9yGIK=NwdwDtwG>qiM$LQogC45HTyaFaSIE}Eh5!33yTL5iB`%65%Ct4xc_@H#M@ct*bcJ zFr`R2cN~3@gCCk%hFpxgl~`gc<|!BdK`0Xir+AU=k#@$>A?{pDA$UX?%1 zgrK!Q49pvyv1@rp#+UleBj`SCme3Z~4GfxuvAS_C=(3BFhM&%t8pD6pEYsd)RGGEj zc=jQp#_Ty4r-0sTM&(*h+Nlx}#HNI3^$-i?i6mafKVq>r5eUeF$d}l(U4jrYMANVD z{64$q$E9a=a%G-lxn)>NGyqZL07UVQ4bmW_@!fM`t&8_Y2zz{K%jS{wgT zW!-iY3{0U^5>i1ulIO6r7=Jmp{Z;19`zm~7jG)R}jV6aF^69!wR(#~{qyA%e6LuC= zO?52J=R1uS0fAee!e>vf!(Eie%^@kQ)xFKv@-F{?1byO1L`_lHY<|BBjz8dy`ysEZ z)Su(`w&`^HrKTJ<&Nw^v;;ad^EWB4Ycix|BTQlQa574mBLNt#K7*WTik<`m~mm3B} zEwG!&hEl_ifq#!_Sr2XqN7(4LE&B;1JlEh=^6hLnc|Y7WZWP=!IvTdzf5maGk}0Li zQEaRe0IP+_g-_2c{t7Sw_bu7rmd2DY^!Ztt(^S363em1wi>@?_5N*%#Yljb`SyfLaumx#t zHU0+ZH4+0}T4EXm88dBq)Q5|#EB0Dw3&!chKQHyy;DT8?HikFYU_XQ;L(6()6YsIV ze}087hW#?x2MQK3OS5|h=ekp>oJ*R4P3#_YT+Kb>wIGM%GN(unKYlr+Eg8}N^~j8z zxVKnY_T_i1rrFG!aaV_S+@IS8Tk;{4eBun?-utEUb#=^nsv$-6zkTz-#;dM~bax1u zpSPD6m_5#d3F0A2sCf#+**VB%HETmp@81t;0-Zu&k5&0M0YN7g4n_l2mz}EGEY*TbUpYSQ2b@t{$P>c zA*q;(AqAV(NGGHW`(c^UJtZQw+wIh~T9z+z6E#2Q7aI@WUhMrl`9%ps>|IiXk&K(k zMFxdpS=5IpPH2tzqFXSS` z9N?&*{r!oVFoC~t6gC%6LM!&^@xgShyR_#9EvA#aey@Ycs4+lhwAefblY^#ELWvL) z6Zf7RJBp?@4n}Loe!QKF4V~7916(BeYIyjEYTT(npn~Mcx-B+pd5Gk55+x>Qk+|qe zTk{uwvNS)Ce)~F$oM`;bm7m;`gZh^`8UxQtANM3N%_C_r&_RG!{cet<6K&x+Xe1sQ z9=1W_ugyWcFuZohKsip3JPJo6dGw+Eww{1#6)D~FvbUT}tlGa@Hu&#%K}f87xTr2h zHApe08QcWLGM_P*o=s~Ta^_FPOGG3al==0Oy5>iL0C&2s zex4A{=*;v#SpwF+P?`M^yrpMjv4KsNU#C%IiS**u|dD9JXiWsFvVslFd(?? z{CLi#1>37G(7@1=9|1k;t1Tl&LE4HGYy0`E@zBN(`tLcOW|q~6Zw&^v;v(O$03ytL zq8?&TyjJ@hAPRmnSy=i@HMlzdkkvNq!|R$8xpMpAT>J0OTrqedMq8PwBQ`dcgOZS) zmiP)Yxo~5|5;ZVt<7*k-zYu;6fXM&wpZBUOf2L3~_;)fw32@dEb=zOPy2UIj$jQmG z>Z%z!Wp79aa1q;DlM(Z8Sd~qF^_ux=-7Eo1FNV#@g2XE^3Vh4fszuTMwBIfOD$06O zo<770$&q@VSLW7EJzf5otR6V+Rp;GBFpN6zkNcgdUao1|zZS2~iRhQxg@ewdc3^># zNlEm8sE-|Bsv-K*z85Qh)KYnXT>7ZYN5hvF@}%-E*Jeb z^IH0fq0#l84C~(>H`=iEej;SU+mdk+H=MFI7mkuFU} zB9_wr{W5a66?g6ASvxg^8^Uo2$l+5iO}9(L&CtyhLBUaMLF4-K>4iGWwMSY3o(lr8 zA9ve_NNzs24{j=Drccp71y8ftn;nQl0W_|VXSdk*YpDSuR_Vk>jdoGWcUwF1YFM@E z30FDKyy)oYV4heoBl{b4`LDq{-XBOXtFw~k2M%vfvOL8p+u&WiTB*Bj31Z0!e8v#l zL%(a}>zD$F>W6x{X~i%o$iuA;zJJp%1bm$Y%tb|B^hMVi`wQxs&v!j-M!pgkIhjC` z@8h6~CR5D)C6be{-FaYG7t<5ZzSmx%B#)vZ#6|30pm&jdE;7J4z5z?1v30$Ljj~K*yYwRo>PHGZ0S}WkZLX}YNSQ*xas~F zfJKLKUm*Q$H8vLbM)|XSs30~h}eN*mvvGVM`RTUl(ikx({gvW5deftKQt)eY2 zPREevyHJWK6pJ7qU(5c0QXCr^5e3vZRbSte)S{zyjbyilpO*t>CSM6psZxDCo#!nA ziiK81o{}!mvs^l^^NJ(@%U$H7FT_ROCmL4;lU!wj&!k2cKOTA-ee+mPJ^yk!UG8_F z`v-OXr;7M5g;CRAz{hy=ckVsm%&@HLpdo5oBuaHUr|;DM*b?lG7n+k|T~=pRJ@NRI zG^U@>_jLkbT)2_KtCh;L1jw8x*^J?;1e9w{p*cHPqsbjQmJfuQ4YEQPU+v~1Xd1-83(fYr_FD)V6jzfH z-{Ztcd*#?o`k9v~?LE~+=uM}~C-XurPmJa=$D;q9-x2D1|g5uve#eX@Kd)mM% z)_vX9jOj|>O7_7TDki;EWSL{-6kYtUUJih7l_!=Mo<8vp;PkQp) ztEWe+r@B@J;4x}Hc+t9C2*Y?tOk#bvexF?NbnU9RgoHJqvp&DNdBDh_ARl?wnwcG$ z`OB9siHuw{Ne9whco*T|T$g&iL*3=ne{bAV>&f?MEo1O2uHAQ2CZ5Jvxf^SxI+u<5 zVXDfVuM7hUM_8=0b*BE=fRfP~55McR6H#gt=PEB{ASk zZ3cW65|&J40!%(6EwxZN#F!SKMdJ`C{T-QnW-4{=H_tjhd?NZiY@NGpZhHEqd7alh zD1mA|Zn_BEnkcu`x|x`roqdU!xy?4O7@k%#HOS<}{xI2rpA2E@B*T%`Q0R6uSZO4B zTW+^GS8%G=vy9cW(k8omYo^ic*9p+u7`BJ$kWsE1y}kh>)1g7M92+HK-d?>?_~WW( z#ft*byhZ1>x{;82#L`tCWL@2gi;b(+|7|hAc)T&b$-K<%(*)(9u&D$LnzPS0IVrr* zn?bf3nZx*4R|54|V%@llAz|x@A-Io;!=<0kZYsyAbwBzDxcSWeXk2Qd{7LB_5kKL0 z;z@F)V9+<;U1D7w9jn+8(l^T;js2AwFvq`8UL;puf+_Zp9qnY(=|-Us5cZdaYIS_yHnk*FoE`oJ}2S=C@H;`pyJR^@WG{$8oG;&{b) zNE-&-H(RhH^E%C7C3h$JzHT>H;-$)9^z0`bRG9QNAfo{!z;}z&1r%ni@kHp$6Klv5 zTCRycHB*s44BMR#@vmMzY-~>a8_n#WRs>FIf*lBGl8!md=BnpkADlIYsF-c9x9;^^ zwQHmet}B9|3sk%6zOLjmo1aCPegT>dN0=s04Ag;6ZMI1@Bz4R&` z?cm}oB1BZ^+Rk>nVW&3SLrn=EMMhZ1Z}rCJLBE0PTThLYX@y4SWygK$qc{L%r)u<)B2WEN)?6q5SynCS2u6jT^d~zN%9Rf zGJe_&I@H>a`Zk>p7PSAPsm5pl`@3Cz=a+5BL67S84QdoI{!Qe@!Q%iw6bEx+A>8zv zR^!Xc6yZmg7t&_}7fgboYx2Zm5cHQR_`VO&utrXuNcRSWWb&%I@b1}M>HB);9Uo9& zi)UE_JRJpKmV?3Zb@Hn?sAzzU5WMmULT_)EWPsWv2g8wU*k;1h2GV((`csK?bdC`iO}Cv%>1_#hlPSVj3#&rdlQ3llj}>3LxV4HKcqgBqJv_T z(aNTzzXpSlK*9Y2`I%(2WxtI}@bP`lgUEKw`qUmU= zRuZAzq3Zn*rDz@LaMt{{Qvn;|{hk_hGg+#)6zI573FUOi$71bQA#JK>I(!oR4lFo! zpZS_!oSMjR*K{)KJ?^erdI)Ni_c?%rsuywRHs&3ioO}T!(svtYB@IU(%zDP>kI(k@ z(=V7o(2>8Zp#jQo#q%6;N8VM+1#Zwbq83P)H(xIJG%*nw5-~SEG%AwxO&&AAw+DUQ zAF!arI|(lHL=`sn=g;&7zi3Dm#qa9^mItH7J7CmO(taIEIpm5pqYzf}b0X$>Ic?H{ ztdV1XeNF5gu0{1&kLJtUoi|e)%4`G4Ha0d&A9cUJcH^1Ie?YFvArYW10-CubGY68l zqCS2BkcGuAY;yPm!1C}j>-ARfJL+Li=L{K&|`A6`TZQCWn^5?6LiAMzn=`Iw(f z70lTC9$sRmAipB{Gvui^p~GF8&tC*oA3Ww_)>=(dT|EQZ4l724iDF7Cl;;XrS$V>& z7;wp@eh(irH|y=&w;A8QFd>~pUbp_`E9$;vi^vsV?Vbim{_6SOjqZ{KS z-vOrIFT+jkyoJ5Or_tY#g_BIhQXl%RFE;$(z3M2#5nHovdfnI&cV$mAXl{H1JluTo^Lv-<#1SpG;v3RYZ}%lZ4GlUhwY|SbrSV$3f}6Aiq>Y zcwnjgIGXLjGFPcwYUzZSTSlF%beV@&&*sLPnmZRlgVnD=9H#Yp=G~v9K5yJWWV{jqMquXO4sLT%nO$!me%5%i$7|))hg)YeHQA&bEEY+ z9)7#w)197yNuC^AkSnAcY?$L1VOln9IWValTDcb9x8A2oeV&$F@XF1;0#51$lbe3A z{Mfn3spz)-oh$OJCHiG=ZEiXjD~!DWrSKu(JBYy+Kg(d~IAtj^Go$TIR26NI8@+$Ew>NY@3Ahrx_I%UqJ{!8+0>>^ ztpy7r8a`PRv^YHWSsmBE%$3Hy+ZNW7JQZz(8|_1SortNO)8p;iE0w>GPXrQV?E7V5 zE&lsHIg!FzS?AN9c0?Grxxn)%H7b_%+tyhOBcm4L`t;VfTSbv10nZF$ds$?DdB(y0 zF!gny@JH!AspWuSNa=Kg8qPj&o{X5$_^EQ<H1>x)ee7AtoVk|Aj|7@xKhrOLa*e6}0 zcek$UrrI@xyYySMeXgTdu>_6z>9u%%ry53)_nx@q#S2$2$2grk%gGqn$O$*DepFD_ zx<-Jvr1PjnpS#yZbZ5m_s5rmqGu_aZo5EZ3JpuLs;e24PP(=49iAUpl_R3pc(*3Tq4$tsssHAm-^$(tr0}%6-1Dl0iW-DdNwsA;#gv7N zh8fNiKYSaeG?KdA-2O(6{a2HMWnqGau2U?^{M}}ll~b~;TjM!#@hxIkE)zmSYrIsn z=zUI6XvB(LpTw75kY{>l#BOU^VHAJw>4RC`q7W@<0#jYXNA0I{4?YXDh%8!a7283d^%=kq;=En;VgK+ZfAh@-(+FS4#4pVP&?jD zVQ34^C2~XIMkL2aDB2fD6(8`Qk<1T`j6`5s#)lUf5UbfvgmV1BEha{emw|@G9iJR1O@`v zZVeYCA{c041)avfm0V|#5x;P9MH*NTW1c~tc6tYYNqs0t!d_NPr?`FlHWG~Qj|K6r zS^oI!oa7<}!i z%47Ia1S{^$2%&Gm{4b1qk59?r1NXvy7NE)X@VVO6=M>%? zLxJ>UIbK_{I&m8>sPjkHBKn?y3SF(sBg{nl zL9PXuHmJuV-|eAe*_sckSqvL*_9n2t%ps0-HFTO=ci_v+(Un0Yf101&`xz<83$z13_6$_jdp4Zg6E`A|*2 zhVF@RC2tEi@;~L-`@HO9b|zaK;-N;;!F;tbCa-ux{iRCUipltO^z=f(=pA1()_xGf z|JAMikBbMlXe9}x=5(^#S~jD2;&AF$wG5Ol+G?!`1c0B z$?~X!I+5dLpyRr^`LQOn#N_1U;OuN{-bX)TZTUT1NtfA|WK5C*FDheHvw`-2 zTe<6J4J}ytm7y5R|g&P=Cey5{cyedG?j~&QDNMY6-d6z`eV8U&# z;n?bIR4_N;og}Tz-{yq=H4w&|1DmuRJNllIBhg%1rrg`kpH{pp2^=0!fem}Mk%HMtk>9w3i+Y#=;u(pOE$opZR_e{$Qa&aF#j^Gve zxp77g1lP##nc%O1;=lAd2;L8DQ0hE+u?j`=pttAHWFg8}Wk#f!YNfq{VQm|*LIMoN zw4^ng98_jvuBfnR8@j*h_UpTdmIdqW>1$-!=C@0YoPO1P@m{>1gJftIb-(iamtg&% zlzX8sfUWkKso#~NvE2HyS4rV*5KX|QK~;|bX#4`8sZ2>j%1lw9DzdeLB=W{3GF&(0 z&uZk#7F}CR@>Sz$tYrNc9^XhfZL+S=;A@i7UjUVXH=Ziq#xD0)kbuZ@b#11*MXg5W z>&mUKIrI$$RYC@M$TEw#UpkQIymvbR#-an41jjswALX)~2!p|}{QUj*dMwe@7pQ>? zfmaK}y1Kd{>8K?hpQ8y|UC{0N5o!$2_73MHZSN`X)J7s@yM8KwT(~wr9vv`ktJk@E0ck1IiBB z!W@EswYh~rEq>dA-Zedb<0^}q(9t6;4>g4~ZKTz;&%q<6+=)mQYURq+2m(XUADRKv z$Gev^V?@X}TdhW*i%k@uo|3mb(r%>3mw0?0B%RuyuNB*hk&J7&L8Ox)R_t~U0dogZ z75wQ5$eCf7T+ZaANs&<*_JI68>BoRycaWblQ;%ZuN$xD#HQa|KB)RO3>={&$aSkay z3TZbdEbJ+%9*=L=l=@_GuczTl z;n-?gBD;Y-@p=g8zM>m?z>i(#$H z4aI~?{HiOHim*fQmvgVMclPLuM&FWuVdP4fIUB?KmDJiMk%hnDh0cQqg4QkA?q(x4 zg$9cla-?G=3+`WAEv9wlD;Q{~hB#SGU&JKz($NPzMaJd$DUwbtG}`hZoorBMuezCo zm~Qrc?zEg&cMJMLYXTPGyt6L8b2`8IvPGHeM{`z}70?=Tg4W zrS>YUyW1+$b&ce8Il^z`>fRkO}*iBf7bINNq3XXd}x9EKL>`aT)hPdnug`I7BJi)t zMaX!t3#A)8e5yhO5l>=^I!-3l0crckV+aSqLUXe25!>6;m7fzMkKCT9(T=PVy-%hq zSQC9}1OM~z52{zG?QAwRsqG~>Z6#Eo1N7K+HO}N43T&hP4-WYUuALb0rhV{I{bu_2 ztJ*4fscC7$VM|_#pS6aYb{gdoW2A{Rv8HMq=kCEV_FUCQ?!83F1_oEIsNC8=BDsNy zOwNfCbLHOZ>TyJACK=q?d4K!vpwK-QYTxWFF4ONhFr|DQC-{S+ubwB5!{xbq}JmYED& z=7ixq?fb4uQ>P~<+vszdZt`3l&?gM5Cn@VENb}mXIwe2Tmze7uS~7rmRW?$ZEdMll zVWI_ScOqY*xyQnf>uP=UK{NKLUEG27)u+ z579e+Hfk34$-|(!QA+N;_3BZNrhble$uj_wsPLZ*s+^&f;b(YoUKG2`u;WbDilWRM zb9$x;v!~#r$B!>-X@xwPt(SvX^k4S{$mt-l0a_bU;Z!yl^2kdHFb(!MJx{A^j# z{x~q1#FyhRlBt&vHH1HfAbLSEt~8guGT){pg}} zs2D2LIW##VQ&!)Kv)nkSMC{VUM#KlM7CFHGsSudq@l}>vWK>#GGK+M5JsA6li(sM& zvHlc*?e*hD%qRYzhl`Qgs!9|oiFbF!ehF3?qWrO z|DCYv`4^;B{RFQcsw(?S)&}Ff=WmQ58)3oW$nM3i3)G|%QGNSufn_U3YePlhBMTj! z+*w|X&r)J{b0pH!*W%+!X}Ywv+0ITmH5e&;3lB?4FYE1o@)`0x^FIkI#ozK+V!LJ} zH@{Jo@9%B9L8Pr*up7#yJ35^dBXq)xXyuN0^p}V);;M4TaHEl`a#G8ik_<+69wL^} z)6(zNvC}$^Jgyb2JWKff57x63%8EcQpkvx3}1^=q^jAK22A31`*0y4t_6 zDZO^|=+W#Sd-=8^SO+NDM`EzPd1hj_IoYB4G2)>n1-T^=wl56>QEZVtjb|<9$^#TC znW*+Q2{RQ*I``&i$26*G!ACn)-XKE=Z$-&I$9Vg~ib2SJH~Wjzm`IYz!K;Z5ZI4}_+W0kDnx8Eki1C9zjt$!r6?yc2tU_4{Lg`F%S1k?|SR{t~Z;M`>%D*pua5DYZ_V$4S^aMT|mz z_cz~qBuuMde6TyUY_pr^6z1N@%l|C+`7p>IZ$Fktd0Xk$*vPIEv8wTlVJ~JfdBqy~ zOLz_UW}e^%RWmS37l=K2>vi?gb&;1^A#W0jX(Y0Gl$8#-rI$LZLh_qmrKi&V)SEpwyLx6IPk?8ace;h1ib_ zsY1jqs#`JEY$cBOmReCLDk%;xe=<_5TM!kI{9IpxBvJpG7XFPrH%0h(m877$+{Eqf z34+IxiI;XFk#D2+H$5ACf$3EU11{^lzd@w`6(gS}yvKuhep(Ynmtyw~9(cLiom#o_ zHaB6K|DD(2t;w4#4mC$Vx}(02`Q!Dz#^2UJ@tru~i8srBG9AW8sqlOv)(h5%;G;!x zQ9kH-e!m6tsQXQhm0tCkoHxC*QgK7RfLO^JAv;h6RPh);iM{dCvp=D ze`@uC&`~dFRQ&$qGtU@Q{4MS6>w#OSJiZH50Mp6lD1zeqc>*@)gJ{5y!6lBFPOBsB zT@rWu`FseBYRz2F<+0e^X5#G86bhr(!CNu0A;@^lG?s^;b%zExlNbg;`RC}|e zS?NQq!X@9ENvw96v#F1blDU?zTBRK6`e4_or5#(E+F;lta{Pz;(L+0%WdygI{vBrg zNPq-i*90be6fG*hbv8VQv=CB7-&&Tbg=4ad)f^svwNoTXydh4^gkkqLqR2&W*G%84 z&8+BP)!rIxR|?(BX$YRS3Or)JtQ2Ac4By6R9|rmMxWWXjUSaJu*2d@latS0#jKPN$ zroXaDlK}OH;56Ar2A|eel%B>n{;oBy&yR+`yJbFDvlV)6RsDW=Z_bue6+vbVk(kwX zL)DIekdW}BQKiRH-7))N=)6CZX;^ZvvQk2PwNuk=RVRl9t1wezQ@PK{H(6!tA<`AP7j@gjZ4fVi%1^9Ejb8>Xz}&Nf2b`c zzI=jX8W31bPW#X#bwQWvPIhi8?rd0StlczuXJ=>ltXP@Rf?dt<9$}Y==>Kd|lTJL_ z%+>T|i6HepNhil~T$=Z|4Ab!smgfRT6v*FMVm)`mYLms!IY+9bFlE9pj(LefqK($3 zS0{BF?5cvGmsf=7(~Ni0Flhs>kfF0ZMQ^Rds=~|Sc&<&ptclqb3)9f$@yb>nrRp?6 zq|njg^rbR-j-}$^<9Z89&883vL^{=c8UN0;qlLvQZKfe^ z^U0@{;}dfU3$#P^m|Ma^j>dj8@ws;o%TZXcqVRt}SQrsgT<_96ThNHTCCHR!lFW88 zDhstI#e1@AQ-*wwgoX6c)sr-JmI@|8%*O+m*=kRc-`@PH?XgLx)1dh3>$Ns1{GS+J z$I)l4UWy)^Wm!j9L=yB^hb&%*d7nF(KJx6*V^xY~9?n~FA15DbE*u_p_++DDH8gu$ zsd&il0)0e@#SaC-g&VRP07W#0K?t?X+O&CE2nB`g;i{-ZVt` za4jiU?i}Gk@_!!0%+da}!&5f0)DHi$b-G7*Vygedw*%+Nb7iq=q(3m44-;v=0dI?y5B#d=HPxY_bTg}=4z~^3jy9RJI zM@lB6YaxW-AX+q>uwQ33+M(rnX)Ghh?4X+mX3y7`hKBBC>JH1)OxUBH^S)EGG|w-# zow)yk_~gmf7$-`_*r5mawZ@AK#+4OR4V~y5mIaoRd#)$OtvD+s)@H|7D`VS<8VPL= zDks+8Ue+A$-sSMj50=s2Dpiiwdy5LDRh1A{WF5;+dPCkv@(vh-W-J^pA8iYUe$a5P zB2`j+eA!@5-b(!hKEZC{Xr<6b!+^>e$7dyDuFMr3ClnXU%PIT*If6W#wPuq^(V^~#8P5j||xpSV&! z_KMr}3&rid>b2!@mvOIOzmD9dEhmQq$(wz#*~fggW)x>OE>7br%JB4E=;}jQT;D%X zn+(H!TY4o9tZ-4}Lj+0zOjY*l{CXdahq?KCt#6WWs{=H-TAo@7s>|^ANF~b+V+wJ& za4lon@t8DrCvK2-B%Buc^DcJ&+VZu$))?LmBUhf$GILpT+hh&9L zi-+A-$A4n<%*yo0`SmuI(5B$~gTcY_)8lMUg*NLAYd`UF%MQO#+%~Ws3Z;>|ehCvU zlR&B)rxs?740;cwQC|M5Thg8V>V>?Bf|ooNg5_CJo9l_~L1xc`8(Z%tAR(?TDrd)! zZKY$dfcjf~j+B4p3AI&|-ARM`r9D8~0HperP)=w|`OX08ZLw-!0_jYHvbFS~+4TWO z5(cjg)1QQLkIBfpGmS<~*Z4a8<+*;45D}R=IccRX$@7xqTx7n4eSr_VmaL+7=^JEW=PR*5lE6J)p5~S-9rs;9y zQNm2+(3y&QR1H>^RzEZ$Jph6Hs{Qo9irwGgrUL2`b~s}MFmEAK>!2>)^Wmq$r@a%t zmw)asSP&ErQr|=ki}d4A{K55egR=h8>!F>)85tQTkWC}{Q;wFj)WvBWs9dEd4Kt#7UhPpySVg{C#(@&bL+=MPDQULT@5V|M1mO`@ft z)?U#V^^yfm(rl&^Pj5Qy&(2L%YfnE`dZcck!#y&C)nS9<>~b{xk&o3}9JQs#`r+x( zc7;ppXU(UFUp}j@#NnHM!Fen=xKmB>ayK*OhQL!s zCpxx|F}2$iy0$JjEA|@a5MWVuxs7FGrd2oD-mLaIo0zJ2QgF?QHc_361PtODWyy)Kr7e z&RELXSrE8g%Mw6hhp1_P70|b?B7_FD32Fi+zIlgX?k*Y1_v{k^dk8iTEfEV#Bl3U+n3hn zlPBwE&wqNE=iLwu8NlrZ3krMX0y#YV4_e0vC13G=JlgRZ1BE@)o4HKgJ0+#ZKpO&t z`R$%g{n*^x&4{ac9n*Jb_QX+A33!I(ZtCo6(fPjbW_z-VOva!~%^>?Kihfj0NlEkC zwQC6~TnS^g1n&yZ7e)>2RGgvdUpvo0Hs$Z*zB^Fv-ck9nZh+NnzOTUL);^5H>FG?- z6mE+(Sz3{PI613(0TIgS&q;A4%1kG;=+`%T5!od9F zKcXkE)>vb2h{@K~k#R3UN1^57*9y21Hmq`Elz{T+QOmjB9B3PVInfxw5>y|5E{#Bd z01MYCs2ai_rCf1%CG1B$EO-O z9}Mqet$Vls-d0>DQcNuD4VG91!W>PU%AFrd5zSwZkT7VuyO&%HJf0FEaKpMi;nLcS zOW8E@P|E0WLR26gG`N4P{AdAb9lwpLYT%FM{f>)`&{ z5A?(Q`JEAmlus4GZIQER+t|h?BD2{`Sp~N z4o|($c8G=aPnXP3Qi4@Sjrhn?GXhlpjX_I(872)35U3A&AlNyF-H|j`a(3 zyZu9ZIxw|KQBkoSdI<~eZaSlG zn#0aCu1K}7+SP$D$^d%L2dyM|cv^_DUlZ@JmB+k#Wqss9t=$qKE{cXUIX%4-Ai+7} z9q*knMr~bPcE`ylh1WZi)m_~7=Zl8A!JhP2HnE@y%I_D~IuA=KtH35i(*)El5!2d-R?U$j{HWn4PY6HM*Ha98^I5E29F`}59+!*b zgXb&~-p>v%#d094eINgbQ< zE$;$xVahHRsAxV^OWjC(8$7-(9k zw7;+e@tpRB3s{5eX6x5$8q3tH7uP?hPb|<(Fu#tM51M%&K*7H_SWjKle+JB%++(UQ zu1z$El01IQICMYjU`@1x!y7s?7kLzI-##9NeT#55>zt>C_`G#@dwu@a5)5c|nrNgQ ztn|DB=zL+B8`5Rr0h8yi*StFoR?Y(<_X-LMrlhAU+jOV5O?MA@72~dX(da_J< zU`Ey4t)&7R@bNE;Y^kcc2ux(!gfW2BU znXL8)Cj512xEpB`u4C++4E8$XN$j|jZ$ea-(b4t<6%zqy$6DbP5W68E8 zERn$yw*T4*0#snzF9dg!e_g1HdG;(8Y`_czT`61Zi&^RErz5WN!h`M`R}YZY8tUbl z)*g;1GuP7APJw%Y-(6ig;>#zQ2j*WJ6aw0at=`?k0aejdRBRwA96yF8O~2#{{gYHO zHtv$2;;Ut}FG+D=h4Hce$|C_-Y&$mH1k~J9OCz<2x@zg@Sa@ft#YniUFCeT1eiL>! zwuW+QJyI#Ad@S3=Hq9hetIsdbxcFBU7V1D8t#;hsJ?2%lSC(xzBFTe$Ym2>RS@@4S*n6D@S}|4TNRzQKZIRGab8bjjZk6Km)B6|swZH!)*c zlAv?9Y`DghR3gu{83S3+t}3bN!8u~Tcv}Gmv}gX_r!pOzyz=)2h{{shiN#2Vn;@t}zWjDA9^d0dLI4uLPyQOzGb1y!jiGvGqzq}iQPVG~C zeBVDE87{+RYJH0}alWs!(|qUz60EJ^2*J(TMKZfBVmTet`DF{Q*O$u{E7Kr$IVB-s zoG2!Fu(FU^!$!vP9fwV%u| zEYZlS+)6!6L}%{j730nnEsKHTCQHu9=mHzO5>~Y%2e$ArHO~wTM&*y%j42AY+je(% z2g+$Xj82^|vobd~_ZPDkR|}jxXgg!MKpqz7b%00in;^=B|=s-z~eE}&B|L0jD`WOa_tW_Rp_ z?FL#uyin1TG#yEQcznqCX5ap|OIi@CnW$aW{m zX7Z7;#8FnLDlaz#%6K=>N9GDDD=Upgb_<`sTDQA)?G2l1WHP*`P2Ce(Ispsi{tj_8 z`-`@eUkkv%Iza&*jo(`J{;w~kz9FmHq2u>u#1FrfJw0ax#|5#=bZeYULvwRCDDd2~ zMF@;SBOQ7i~oi^RRM@VKm)FivW zc4dGav-z`T&@3mrz= zYpHcykVOX9=00aEwsN>EHa6o*(TwEYnB?5T*F=`}5Khe%7$^4S#L&*p-IBqD%wyQ{ z7co#b@WYoX@V>w9a%va$_I~~Wd#&kC-?+nc{DA32>Vxm z>wCP@OyY}mSj8O1ewNoN7ZOaJzq8i4GTnjX5|q@`{ocZHUQ`gTNMKU*+0VY&e-m#h zrYbMGQ8gv<1ueEk50lXRmLr;VQY5|bv zL)&?Rex8LDT--dc_8*^OA0y_E zkJk@owx?MF=C&K`63O|2ivEI)m>iKjfBEZ0_ zw*fjWouG9H$2KGE@Wm#3$v9Ymw9p74>PA;9*PzT-5JGjFq;mugEi7AJ7_KtrGohJeq&EfLKS z0I&q6)U30YeE?he<2pnITTn(XxU+5-L0UDmSR}Iza}q7vn2}tbAxIu*NdpgC+(@N{ z`A~3iW>;6&C&Li4ZUpa0OoSVnDh{;%;@7IN@Vkg+lv$scO0dgSwO7c3Dj}$wa z`_@PeP85vetfZ+QfWl5#6a86>mgo#x0gK*6Gprr8^)Xtsmq)W!zq|=<1uUucwc|y! zf5@x@yY&8LAj+HPYRxu;GonsY&$`p~{M!OtQ4S>r#Ly=;PxTBC3z_#TIK_kCI>;ux=%IVMGF|+FhQ_* zeCjhzPh8g3O#=AS20|GGcv9Xdf@}>E^)6(37vlKfs>B~|;--AYhfx? zF5DHes(naw>h$SO03qfv_H(a)o{O5U-oGgmE%zVR?mz#CYJ~$L$q>GQ9{jyQfqYk1}4R@B&137 z!#ros6u>ltBP87op&lSt}h2 zhXVytBzA+0G7;t?enIVmlbMFv{{!VEVMrN3#g0GxOam$=0{>ggq4ymJ$Sf173;e^{ z%#(z;zg!<`30y5>Wr3-A3pH=l8jne3Imz2$i=lOFp`l5Tde0Xn}*`_a| zY5@JDm6YhSt46j07_-u)YXXR<(oBC*76kA~fUY3mc0)tMO!*6klMq5UfWR85Hsby6 z2+_a$x4a_$jvb7lDfQm3gquy$p~qMjyq|`)b|_@Ki2%`6z!)_MBXuCvaFXE_!+wK^ zMhIod^zY5#-(HyX5WJRI>_=rXwBu|>uuc8o-nz2Y9i3__|R!`dP zvASvKY;Ff?1B_4OVLf^BzK3Nb2`09&6ofbg!#P~&2lcxZI6a0PwlkefKsyQ9MELGw z`my+zgP!}4E+L2cmlKSLhx~CmTQ3jw3=t)&$D_84XPq`1JA3b~+u@4|YzWcJp z_Gl!~WFj!UFI~dJ+{;d<@kK)%{(j1@=fS7{3a^`f$|+6XmLqby0!-C0Zb zR9Ds4kDwpr0h|G@@>qo-Yt{hIK|~ezu@Uo^RR8W1iUY|sqxPRk^!%1)=~eFgbwwfi2Po`3ZgYj5Np5d>HqAu1|?n5`ct&f#&ywgd9a8 zCt+b-5fPCb<=O!>xB3q$`O6?PDZ^6ytW0UpquUWEXGtDUs|k<(>Fof7wuX2K@phRe zRha-K0Dz{Bq@1t;H(iGy7jF?IY=JN`9H1A>&0Vj8!oofqL&L~MsI`6A7FB*+myPzS zzgvj^ZRGX$u&!>QpOY*ouzY^6&~a>K-336YBn?#L_mIQ+@$D0mk^}Zm$;fDb#H-e{ z{0T^cC~~9JbsdUt!a){%l4$^jSGdes-b4#fe;JH_d~!mfqD(zWzyeC;s5!r)Ck_J zD9i^F&4iF54w@xNCr3hq;;a45o1Fmq<-<6+`7e3$VcbR1g5XBuLr)RYu; zQ`5MYJSN;752}ib^=N5nb9`%*4-%-KCLy*T5%4>BH0YcBoCT!H!J2ZGB_k`VNNfT6 zmK5L~drc_M4q72r-vjPp677nI2c>QADs3z37i-i_gWTf@F2NazoR)lc&-^~CrB2gQXy4-sWY z4W4tJ^(qFsqy!>NT^uKWyn(9T9>vARr#_C7|Y*G*atl z(-6XGkZT?Q<#py|5262|$ntYjNl$?u#syD^qrV;sUXO$0T8P>SQrzUXZ{-kaw6aQ} zAI?fnem0%I4}4zBna&gs;Qc^7QOZrS!Gu5=f`tQg9;EUX60Zenn<}F{?tTh9j`s{< zt;fmXRgYCD2}eQs5+tPqfM4g;8SU%l=2J*16*5_*e7-u}G1(?@7vlftTvrz|)vsJh zdo66=XXD_ItD&h0sasNH?hXa=MMh7IR^X1C`wA{UQ6Vy^{D1^l&Q_9gX z?-Umo;=D2d*@y9=y3Wo8_yh#2LHip)U>J}(Sap$OOJf_x2R*W@E2aL}HEb|inRo;oW$JGJ4HIoisn3_xUzWtDqi-kVb~9Q3fRr zZz0>X4hkaP88l`){7+^7dnvz`7X-x^Z$e)lbkj8a1x_tyzBX@==hpgy5tLSx{sg(5 zw%2TlXn3Una_NK{P?DgRv~R={t76cE>mq>axZw*3Mq$;#2AD}7{RYhYO-PW1Qe2+c zl53s&xx)wS5R`QE5rI((9z$E|UqLvT#0N;yJ~@qi1=}zfPzB0F!KU2$+P*RcE@x4K z>>1mGrM!-i5mk>9eRP=dtmQNMVPX1>61~aWXAZ36qHd9bIuUDM{f8TaByQ7VZ*z0`&$}&2 zm6VitKqu8%&n+helG4}LpO;`e#_S`tfmptMI=%}BJe73g3OvGl_T3PgOiB&YBIl2) z)f_wpA>LfImz>-K`r)Ax$D{$fB4zdkB%*~N!X;dimG$kF`cI$8q+%Trr=kmvcKDibJnbOAh{?=-FPlwM57 z%F0SnLBYSYMBoJWHDuFH>&BFzZQAc7o-_hph$YYN3EeNdiu#LEkWY^V_h|7;e}$;j z5~6!i&#nB2#Pq4($LeR{R*E)INNd0KDAgNcfpu&EZgC>N^;Y8v#joG`{ii;#KSh_8 z{L%e*3qS$;hRi}!Y$&Kc%zv%eZu0=0Dx2LyAgdeA$AQmu>zui9EdryRhbP4+y==t( z{qO&EWGFpk`?x|aqJOD1?%!MvBj&4E+cBPN5injj6{69G*;Q-E{5{~39i(naNl8ie zb&=ei_>t5)L^+Gb-p&8u+oApB-zrIu9N}$&HFRTZ0q0+TbGp`R6e;i_j3Qe|e?mh; z6;)NcVO+AU--bLC=*r8>vtte&f@~lVK^%)|@BIC%zk3pV_q%0D$%54iN_P10{;H=0 zk^?!d&FSw#6I?5`$=x`pb3+tl4XDE_QRh_fFt7?r3I=6zlpX~yCNeP#04J5cY*XJ| zFUE7XVgVG$zxjp#b?wgFwf}G2IXZenb3(iB)mMW{{{dGPF7YqJUl$_JVtLw zTBk_QdGD1Y@Gfvotm^~m*S4#YlNAut29?G6OXR~rfd&Eu2N;t#2oEM-E7%?FNJ+5} z6DBorl%Bj8(3wK}Av-@;pyY5FRe2e)Tzt|B*>l0wcUTp`(s>jV6xK04*LwR44PX~C z)#Z~}h9z&1g5qpem5#A-G%^rhC7f3pPVnuvmwCu@^#gznQcchxv(y1)s$Uv}qK2e) z|C-m_pgY#PtCs;q2I^a1zSWCC_cEyF!eO;0TzYX9=$;6$4fG3b$i3GTF8HV?tx*O#elWe^~*k*Wgry@czM?7%qX8#wfi% ze1aSXzFJ#aX3LZTcH0AF0q5_dauJIyQDV9R7LBb??}db)<_+0xI>63|F&+7W+=sSz zy|&wSBKgxwZq=uQ<@lWqMEg#v7-3)(J>q3RJJA|FI6WF__PU1_HKneLU%P+;ln+g1 zwnLRht_uU|8XCbm>3VU%6taPu?o3CL+8<)9RE@pSbnU{ZHz>1iHx4h*CNXJ{I_HxR2(xwmBl zDJfXSX%q0af=~u5JLY}&uDIcWJNxsHK}KMEUu*D^qHRKr32g614~NdhUMP?u@C4-l zoon;`tHA!{5D?Hrl%Z<3zX}0@b1u9SM^kJxJNq9#mFXV5gZ$+jnp0@@Vi3^0sxq$X z&8I#?i@LV98PJZB2#Vb|xQ<}YP%5#891f{804?twT2A{PKmLzD)~LezdqeMIqDA*m z=K-+0NGOLt_bM+3#3$g=Joh#yx1iPN4Qyx%RLuZ5?14&$Ah@=t$3^&c6vEljmipf} zRQ4GXd7Yw?LCY*-%a9<$73Z8c6X|6H=ou`13lN|s1epAT>ngf?I0RIdPDvN!4k--!2i$z z=a>GBR}K4`LG?grZMtz*k1;*5A9#~*4kdFf55wzBfBg4N{$ov~FOg%+Jxz%JOG8mh zNOBqN9Pm~h@Tk-`Z!UmW=m1K3K2&-U?+T?Xmm_;nJmcWxyaZhDocA?`Wd8c7y1I*k zXV3P!P80(qBF+%BOWT@vLoFf{=htulmruCD;3`IR$m)F2OeGBx$c~M^|JVxmE;_>m zXHOX0)mQ4AR=GKGtO9yA>BL;p5o8B^8bA^K^u=cP?^`hgOM4>lIHcO`kkqdNg`bO^ zy}pvA9wc!U67F81i5t}a0~w4P*_gNFPY(TBG71+-6ENi-{W$T;uK!Bm%{I{C`RWDW zGo8KOaI|7`;x!U=&`$_wX=0VB?_Id+fqR$d>cU58CC!97-waZ%^uauI*8C%BhF;DE z0LBU>2{y!s$M5}m?caa;7mPfXhQ`>)C`tmxD~KiE+1qi4Gq3~>H_}6J@hRPPKUcij zx@rOxC-YK;58e`^q!-}RVNllwFYkMps2$Rp0*J5ga7j?OmHbvx(cQ;6#C;@L^P=bA zHHKgx!l^kL{3ZbvhB6;q7O~%ruyObIJRx(n0SNq+m}@3|U(@H$j~#Flfq4}~5Gm^1 z06=+P{w20*6Ft2Rj_N9i#1`tlpQAE4Ag1?`(nHMbV}4#-G&9RA1IsY)prYrZ4 zo~t|%_Z)=kYgPZ02p^g~d(IC2>xVVf8J4x}-BPdJQoys4hIS@|`3DPC++YGZir=X4 zMss9JW1})Fl;}T)L;yOGFEe}za}OldX1pO3==>w~kY}K6_;194)B}_yPI&SO+CP*{ zN1WUlqxU2Ov#T@U%tT?t5mH_T#M1ydW%#N9_ya1@;`)%dg3^*n>axLe=8<^bi>naL zIsU`;|K-3@ONi_?s3cHr?^2?c1K!j}$_+tZTS|FPp`CM$2As3rOV*hql(^WPy}ii@ zgE2o}3t?n}t2=8}M+jwXl^1L(?Lg8RM^dVIB&8AqjW$c_Zbaje|I+PWpHP-aN_8gG zoB{1k?@Pfmai58NAi)NXD)=^poFuV5Col*tLkl1G*ZS>kzz6|{JW_DjEeWmzODv`phzMoU3r2@r>Og4@sQjTA3Ue%U+(@kTJ5GDXO4AQ?Cx zK*t3>hBCxMMYRZ-RBa%}9NxQP!q6n(gEfphqyQlf!5ro=yn)TfFCY-(K^P5Sm$Br&EGfGU z&93$VZ)ISp;!Y9$HzZL+XvlEcoFz~?Ay+9X`yu~jC4v~y-&n*!0%lS7^q97R0k}(M zB1rlwW&EbA?Wr#8%=?&Apb76^`t<9a;S+8Ioc}X*M`pBt$czMy-+Fk1EnwOZaFEy_ zleGdc7ikQs++GlSN++m@5IW$L1cB}%a-0~e^tqN^-6JV>{JSuQ;}nq1paQ-EswygkA2fZ-Ydm8Z$QhG}qqzVYE zgg)0P?66Rm{WM$(&Bbp1|B+vh*WKKC24;lIAm!z{c=G>=Yxaj$6_I>BxYj4{4x-+|7A;LIvsLf|{kv z`Y!YVrDSI>$uq{#lSwiCz=7honYR$sbf+M8Kmx892~UKlVhJ^u=aSMEzmw{Je5a1M zgc-$#`_b&JRLw;{wsaJ_`}($OCrLieE~j)EcpAs-bjJ@ZYVr;jMH7p=DA=&)u^_QY zt2$;`PUUW#**tB&gyQkwTGQPjtBl(~I_sX%DDUSi>StZPWad4fSJqqhs-lu)({-iu zCUn8!Iw4Gf=FV?=EfmGzf{@77dA8dM%5NS^!>CoqjySkQ?lMd))&?35_{FSA`iHB}qC3{vKEP2`B!^kGzP*$_-D;0x*VKS0p_4+j zA>Yw#xJ3+?-{x}QIj-VGr_bZ6*}4>G`E4#5JgN{)Z{eIZYcMoB*%3N)8cGK9Hrrx5 zP`iN66i}3@CbP{a;9T2^hP-TChhV029(V347c^8Lu}CI#L=6#7v$3(&@sl~qGfm($ zPF|?qnw$eZxqF52HgimI@wwSIdb2SFMw#L3n7@USw=~AiHKQq2Zx7xK6(|bNd5ln*l|7*O&_j3#+=-Cx$eE zs{V!M{acLnwLvGg^EkJr!%x5lgu$6FrOM|AO#m?*ar^+aWC8;aLL4RFdBIrtpnSLQ z*TiCN7OvGIyG?_tU(~zqW&FR8GyW=R8!HTdH)(%u^7VCVrM`-8B~==WiKk5bsYR{X zxp76KN>7ubnY-4+b)UvB1^2%=9aQj2K8#)Nb|&@*efnHRm#@OL9yAzky#(729)4;D z+u@7V?J3F7+~>jTy3T83aj&VbSg7!u0 z+|l&r-Ng9hTGIy`RaLmv$5*~*YY?{cUk8nkgDoJvR=<(b*a8`E;g^wJgl7N%6P?h_ zaN|4kf@0;8juVYloBA|khRXj-;TK>iq>6t3=p)lYlh`x)_M-De9u?O4rSq=?w*`qZ zJX*hX_)^6zTrn9cvlm@DqP6(=MaxJq*9qxNx1g>M9cJg*8m!KXMska!XV29>#@g$v z2*U8*o_+Tcn)h@u%kZYw8JSQ%YSObc%vDADmE9Hvg=atY_a%2HUUbh8Ji*NrB#<0! z^}Rt=>9X}b%@l?reu^kXC8tjmj9DAlMFZTk%b#b&Z({9QuzuXvwXcMzDiZ>iA;HY! ztM<@kR1GLw1!CfUe7j@>JZH$sJ@W%;1t#oWN9_-YNB)daq_82%2DTmF`!zgq!Y@h^ zF($@h#X|{OTZaeJ++(kFt(H1u=?fpjO4Yu1Z{g(IlZpC+YaQQUi|R{8yK3<(4K8`6 z)mN@cie0zCS=)d0#eRsX7fCdyV4|y0?@Ww6&f>fzW&Q#UIicRWy>7Nm1G>&mJ(~=Y zZ$AV%PxL0dIbF1Mqc}7=IyJxZBr7``Cm&3VDY~Kxdf<6>JDi>#w;#zA<^k0kl3*|N z;KN*QS&PnwUgcJD9jc>OwEpLu37d9-~Xlv$mg$^0`W)L}Sy$+>i zynOM3)Tado#Xwh%L9>hiomfdhea=e(4p*W!qjcmE>b+EK8el)Jp{MIT4P)ctq%C%# z33*j=e_0YBf!rA5oNEuL&KM*B0}9Gg>)(ww&}tv(X6!$M5r15Wi^$a34sV{u6qB`Z zel3Im0wzZB9Bt%2zsM7 zeKExv8J7w_+BlD|7j<%Fc9q8XXX%_DJTnv8DN|CB+bJg2G1C}h=lo6W?%B6YgBy(c zOHe+A!HD3wZhzxwp@Lgx)uBG@1y_a4z%YnVs77}IkPr$P8s;9l4Hmg*+!mRhim$@Nt_4kP1nt=tpd3uJq5!5Kl{ZH z%v`KB1}ICf-rYja!~bOyVkwYpf|<<^KgtI$FyaFv?i6ffYJTa0u3>4$V^=y#g-@Zu zcYq(tmNZ?=xXzouzsntW?BopuA{4z+y)~7`Bc8j5dpKRHYKY6kCT~Y&owC^5h{3*K z>&E-(x5k_eEiFY@1rZJ<=`Ga^#8@yLiy+WXiV(uIppj9ywhiHg0y$3$LS%$T4j~kh zEPZvKq;Gm9s07!_m!9jPts%ROp6UJD3ppYh5Xd5r7w0q0Y%5wRq-V5lGc)5{nR_uT zjZSOQF^$g>XYD-g%*Z9(NG{{siDQ+NhJv%T5A3#2oTdai52~5UvW)x__Qt&6Rlkj0 z9r-;ev!};1oE7KVVx7WtMN+s|FR5w880#x4T`H6bB=(zrKlH_S)D3)>4U$P_j1CG6 z!QenkBq+Z%>=SrgAQH0yq?%&-a7-tyL+b*dCi zj&}DA`I;6(+2_i|-OS1Lb?cU5f;LNB{u5xZik@CtQax!~7SSS})%CP0b0QGSxhP~h zL#s;5syFt+_~hEl`mr3D9kz`YBtj@hir>_2W0Et9HMr)XMV;>@9b<>1^`fJzmh6V}z`jJU z;$=Z06W&HTR5;m2Khi3psiau}!a07kG(Mly?15;--)}YRakJO!kv|P2nySNHNAXT9ZbQyX*zk044oiQxFjFf?3xGPcssAfSO z6SnK6-a0PkqAVZ8P~f`_bU`{{yU2?{47)x15=)jOm1w5tzr>NHx)4~bZ#k~H;D)=U z;yo(JFHCL2Up6dp(bTmY=zeREWLM-C5m+9R1**Ys6l2dc zGKRGDzo{#wOUY5RQt1f|o9l_aCqBM9oe~nF#r1xXDrzG>bwl|u>KOu`aJn`|HDW_K zuqtb4Z>Lq4aZqzTXo_$F)alSvIvV?| zBqfyusg_Bvy7oq?KRhR%m3 z(N7&pigT<<>~hT;vT4(mwF_m80=&uyUS^pj#Y_6ErRR;5W0c3)0iE2gfGXE=UC%sn z?PoFZEBQyo-Y;fz-+M#hEvEhjdR*qbB?$Clc%Ts#N_9w3QkJm3I5e~n7#$tWozxL< z6pNmmH;XfG0r*`AXb3H%wpqxvcE;Yj&P{*RJ1>_zAE9-`r2Cl=vn}N#WbOdZb?_~= z8V5Gi{@p?m$c`Uwl5bMsVx*fn)S4r9-lkz-C!MGZO#>?W#??h?;{_cq=X{HJSFe`-HSd|6$UqZ zjG@8)hLO`m2OvNNFf!Z|&=*mlUni~+wnmAfO71GH8;W=gH$P|R)D8&X-#-MW(3>ofC;MLv+nUsyd`%cg_BB?r*=mJ{5SH^X)1)}CGBs7P$ZoKGGR8X-K#f_T zJa<=-bWXw5Uag4P!aQ_18A7ZP(YtlfWjTlp9Xj@1L=X+U|7}9F!vR|_`RHj>sbV#D zO}TMag>j{eQdp+;j7Oe)=$U{=Eg!WncrbDm+c(?ow`tc77b5}wQT&z5)r$oec2w}s z4rmAU+048qZ}s2PZ)j50_3(HYshR{+BaSvlaRQ^@212`0RtfB4yEWcG3FHy!V?sAu z;UZy+p@7ee>$6BJ3DU83{l<+n0AFoiFC&a1ais0q{~pi3D>`iD1*Af(5O?i*I2&m< z0Ob!KUW^j;He#`^ud-p={!1n-;UV69@GQFG)DbPB-^eJ(@)Vf7Tl>BzD za%Ry;yQNr^XqNV}v})qk#43z>sacQ7iE>sA1U+a&-~qa{zFkLzI)%`n5yoO)@$Gn^ zivW;r)Ij?p*rR33L$4_LF`E@dGlHcS;?n7Xvjg?5?tz;G@-W1wkSCO#y zMOA2iejZP{SmQP6Wz^@2&{$(gHOU&B|C=^`^}S^3cMIgii{ ziV?a2f|MbbpQU?K`{yklJ#-AQ5$h1A>>J;&I#C}2n$r&blAWT4rSGr*C2p~SAx9IrI zfa1do`7xsfj_JAovd@1Xz!EYvq=ogG?`7O@COrsN4GaucnjR>I#+1desRh2G*lw7= z1Q166hBpWPI}wmD3+zT#^}RYe_eB%U!aUUvRubdu&6n-$qRGj{|FBB2W3XT`yE+Cn zYTkX~R-jt|DG6cSLjAE77$pc1O(jaS%NXdrJ1d8l2*J*jSHHm8nPC1oUQAb$0tg){Mjk5yGM9R+9)b8I;(wudm+8N+kmc=_e(kPO_t`+zK|Ada^FHMK4xy11(2J=;g8p1W8WcA*ucR=9RU(d!k;7oVU9MI^MK4V!BY3k6 zZ^aw#Y}xwK;*|(f5MV6KhbKoU{vXQT0xHU_?;Dn(1tle=L`p!qBnG4uDMgS5X{Due z7(@g?P(ivB5S8v`h6X`GrE35I>4pJ@nRm~5p8H$R_r7c0x6fL0&f*+da&hgw|N9@m zP{*SPGn7J1=YsdY=U%{pvHAuDtY2Lj;ABhAx8>QOH~u5M5U9q120dckMednAC||~f z@S%PBMPb9QKf~{n=y^UJK6qw&)XsN82iK){YIM5pq&bh!>oTK)aXgeBRJeFJRcf%` z-M+c$ikPdWdwKL+RWTT*(A3kZ@WkrjP|`B4b#xOK88TfXlO2NM7(kqB+V=I(i_RPp z8qE`4{Pjq{*h@Xc023%DVj`#bGx2TelNkb@o8kpsp8=@$9=>1~aPMcsfJF8dfauLg z1pzcRP?9?$w&Mszf55J0V|!b2RlynUuNQp`ax?@I;I{AzSexVJsvs&D0v-y<<*T0o z)cq+?69m&`ysfUIG#Hc?rCuWSKV zWE}`Tg^rde06fEgy+;h}XyB~z+q(8tt+)5U{q5H`siQ0{kkU2fuUoBS@0r^={%~mN z7uk30HqU4LqTF1nxZ+atFN6I)fu}z*h&`oSqc{KC=Rpr}7 zb8NP(&P7{E1D15#s%1CpTIv(#_Nks<5t_ZwTdrisM_lYqkT=17WQ0yyn9kbTb#6ionHMw@NCVkF>(a)Wu#ri@jB7BvA))I*4?tE-c%E3ta$7Jxt8 zxpPMu5W^BqxU1*ZlF6dLU-}ncqCkaQUB6V+)OB3>^kn%L^&0op{czr z+3`Btbz(qR^_IvG!zdB|VB>dIt?grYbA{LJlkJ_(K6E=Wfl<%@Q78hb>zR2wFxZ9i z*@H!ol>_g7mh1>?ZTi4XyO4V_T7_oiwRPF@_R{?T7bkiodx%rhTKiLg@ZN0#O?RJrh!EkGrrajOsOe~`oGkZ zCp}RlUlFFB@0rj@G@cv$Rn=%HhGC?{!jG+7R#jftu!bok0*W3rke=yKXO?5 zn(b%Ba|n!njg{rTp+U^KBTIQ{9&x#e!CQq;4QRB==DZk@!0m=xb%V2|mB-oZI6se8 z#c!({g!k)$kDJDqyO9;7h&iKxvUUCjK0YE*9zlXa#^r^hFJGb#&&)}%lS#T6{LWp= zZCdnM&bL)HMIO*$jK#&sV+!)DLoIJr=wOc1KhOP|gzrlUqxaMlXB8j5X6Uobc1b-~ zgd-4_7{&J*r5|i?jDLgCpNJ?<%^5g+)2kSPYzg;xxMsddwbuN&^S%7(My>Mmg-PFE zCNpBABH5dSZKS6ZOuwzn6fUrRpuSi^=P_4B-=CTBiaFtd=hq5idEH+NJ0}iTD^VAu z$LP=|{eDf&rl)4XJhRUq7OUo_hJDZ0*OLiskf-@+8mDY4@M)#P_I@i--lyxgR-;SD zLIM@pzvq6giAF|&?GF7|$6X`dQW~-0MZR>7C%VgeE)W4|(@Rh-qP`SlwE+Nv7z~Sv z+C<#?>-E8}m8dWI9TZ2SEvzp{_e5~AA~;*55V`u}L5340%I=;*cyIk3?Y_<044&B8 zJ zIMeH8cdAM@gsIPWZ0bDOJqwv_^8lMzq*i2-5hGua-xzz^RNE;&kzM4}n$-S&{Ly6C z{`Z+XdX%US%&T?WBE%BBd7M&WPCk%Jyl2T{RW@Gtg=ZCvUyqKuJXT zU^rvwRPT{S(0Rkqoui4%jo2Ob()j+QY-?t;z|P#`R+0q%9kYO`xQ7#31ebVsYCp6T zlM=*SwGN3G6KN|~n=(e6$8YY|49rqby7<6T<;eY!MA!g>2lqwg*!XUwmA9XI^@KCV z8FLbvJV0&3;#l0zQa7J1P07W}-}625GxRrH6)Ph1a0#MW+f=c-$8-BIKP8F%xw^s6 zHhVc=U+TGGOTn28>v4gVeMy2R}qwoN&zU z1X@pGK5M@|eAcr6&q48jMor}BYn*V{!%N-cPbrr~whHcPc^vrD3>-Rq6bcj%-xY{@ zt=Dovwav+R;38dvKydeI?pvl6%~<=A?Tu9*h-my-h!WA}Rhd_tn-+>rPqtN(dzUbo zZT`qSbX^V=c`K7&Q*Ptfd%D=&Ru0>hVr};Is^;QKm?PN*w#B}XXM8(msRFyaJ7z~G znvC)7Y`i-XQDI|#rp=Dp-{u0Vz7b(HGD_B!10P(7|K8i-%3H73qg&;I4jr5A8r6L` zT4Q5-pUo1gzeI$6STl0kuGC2q97WY(YNB+d1raX_uvh1T%s-jp{q?#(f6Ms(<76B2CY;il8(#d;;;V(iVuAvZ)V z^SyYklS=bFM=0y9hWj&<&sR-6{rTsN7*gIvh6}99J~272kfN_iz#91aa)y@?73hk8&%wDEHo}8cT9rUaWlZ z_H9<2Q_z4GiYj6Lp@h0M5g9ma1UcIZSubDW^r}VIX#-W?TrhZ;LB)1^ynD*lky}VXjk-EFC%5NMa*u9InE!G@85@dHJEV zPfkte=`(osvFwHBP)Z31+jk$QFY+V1Slm&2?B0t`E&-J%g@PA#$xv#)Qd|&FS zZGjklNKl}F+3^Cp*MMIknCOuF#7WO8iXS%AVVBUz6B9WTku!UbYqpOcI`R9={^SYo zyKd7Ow581Q)iYzdg9M$6j!7YID>`IQqtVy|!(x8v040IZ6+3i&Qb6eZZ+gC+M^b*4 zi8#7V1^ZAEVh^az!by1r@59s%UAbA>$ti56u2KpT(NQtz=Pkh<+EdIhWPKa+*6TI@ z4fj?7XU#P74XVLGl@{_nN)0mObi$-_MDfg;lZzh&=ghm~XL7zsXCvAUAzY5tyIn$# zVZIxR7LHL$N5&o&;hK8)SDwmyOhT@_5}tx-*4i)BHWZ{2loa_1&q4Gb+zo@{?&g z!^B0%7hg7sQx4GpSPU_HZ0!iK(K4Scfy53KHJU2Jy^W^P!CdRYPl;j+5GJfS!{>bC zTzgIphVQ@;blrlc`ZhilE)FY>FhuTFq!BLS62WCl`dg+fLl3MX$y*uv?{0P^PHStY z$O5WqZPT+_`DLO83^-rHglv6gx zFfY`Z0t%r=r{+(&r_QH7Mp&EEvZtlHnxGbkZTY6OeM=>1vMXw#yx%$w!un_jwNH5r zg~v@E#Jd)-2oW|>og|%{UrxX!5B7V140;}Y@_ln!N4K`ZK!`iI$$l@M>eS=6_Vc@& zm&W~Ch`Vo?(nyjkSr=aLpR$JFKK6cbdb)nQ;O?$U!hUKsHa7Y&pLg6qOD~!Vg{?Q~ zsis74T5wU^h=xd@{rnDdX2l0#mJeePX-JMTa>c<@zo|s}X*uEV74s4X(4nW0(1b(5 z;mL2I72Dr=aS}mTf~i3npAQYiqtDz>{Pv`7fd zMmSjxlbW-c(eY+EaJYu=Q=zT2%ohiF#)&M5+Z1-_qlauP#j%owexniBr)@DmT%`z- zYMbi_wmH74m5}2KKcynG{ZvpQ?rldOSwKek;^&BKiu19s)KPuXVYZLQ`{$<=f}Gb) zae+l{dy^pz?4-paQoeBn)RbvVVKeQe-DR9ZB0GN7G9Plku%zBU?4gjp2D{C$SD;&S zfdN5;Eo0J@dO)NR2~PRj`Kznd!N3g%4e(~jZZH`S+rt7h@GelcvIV-~WF*3$#~?lb z)NlVYY54bFE9cRGn(GNS3(2`B1abcJ$7Eb2Z$g}2vhDV+*PnZRn%2`-`F0E_e=>v; zkzB+i&N%Ab>6(0TZn6=iy&>Yh(LvLWQC>d#7@ySQS#b= z(4O(9c|@7TjU7IlnFVv9l*DkBzUy`9LRK^hc@E$yp_C{qCL*6(sW0M)|6RY-&R?i$q8F-iIX0&W6TFycCU-?Av*w-hkoZQo znR3`%F^k9NJOWZ-{rS3)`!*&UW5!81L$8WrrR=_EuyLbnvaaXXvt|1*gN9_EOdUeA ziPe9l^5fpyqo9u|L+*z#WwXBwT3Jd%am4(*@_pPLPHDwfs{Y(UQ)9n4UU1od;?;$6 zT@>zH#Wte6^y-ACQqe%|ktH=+Pag%fv@Im6`tS`xkz~x-7E)O;{grTdblZmFvaJ2I zmk^S(dreE?=y&CcH`~#bvE0uUS5}G!84?}~QU!Oy?)E_x>zYF^M zhV83zYo1Ggrrf1gWpMmuDRU?JA>3*p(2&97hMoon1|ma%PMJSY3iJS->k z?P2jnBADk+^`2c^dp&eF$1NHuN{jmX_%x_Z6k;^|Kg+&<^R?=2@RQzHKja!y4YT^> zkeLtObpZJl)$iTa;?*X9q9N4ll5&k6q^7fj01uMW5;f33_M4nZ-Au%(UEk-qr>?n3D@kP(Vvr79j5`xzRU!^`=o{Q2 z3N=&bY|lH>1-S^%R8y@L3T79Ew6cTXWTbBj*R-j8E?|t=|5@(h!0=L?E8^M< z@vidm9Z5e^5|W?u>^oeOmUdF%36^%EBcP!sh&e7Fi_zzgn4Pus_VxYP+{^|Pc6$2y zRE>-n@VX^(7IaImh=8Z{@ULIL!OjLmF~Cz(0n%lC4LIPc^B72Ar2saP+1q=xTUW%^ zB0xLIo~UW?DW;_jV+G`otpiLuGHcgXid*ytpImYRCH#(S*0_a9%;(stXfX1`ezdkAd`xwQujoQ}yQyrc9yZ(l8hAo%@I(8-OLENS%{Qp;s!}O$Gs75*n|hy=-yu6Kn@~g7 zYQKX%nv2$;TPR4`X+f6M&l%DB4^?bFN~5;29DAj@qJygKKct!xBf0T#thQeJxYW!D z=gK?TZ5rf7cU}{&ax;n1&N%6uvmx;IIRw$WXGs#uw^MN7h(f$W4pN@8&2jI*TsJTK z95-NvtHR(@4tBZylcqKk$ML49&ie_6Uhg11)mMo5cl2c7h^hu+K0(v6&MrGdu@JSS z!&Q0L$6ClN(hsw43FtZ}<&*hE#Gt#0)v@ z&%q1|Zv^iT7=^$aZ980LwA3n|(bAs);H6W5{tG0q`&W^^qdIDt%-hGOKCA)=3wThC zuibCWDP~DZ`E!&mXyF(0ubuE8s$}F49cZK+Xd5mZQ|}OAZzcEuK!@k&TM;3KN<{A} z-XsP zLqo~;isjk`pkIfOofScluD-SM=v8%KrvimCA*{N(u+@l^43rYC=Ctn?Fd+T$ta$by znDQ%&eA&8pb6*d_JCKAH!w_r(!97ay!OpZ-r=%uCC2ujg7ea`vXS!aFMk2OusyaK% z1RII)V4``Oq1GR>>e-RBk>qDy1`%awDXa)TmO)I zZJRr7UCj>9&=})0lj^fI-P}?PC&TX*>yu5dw@e&@aA@#LLt#cZ7awS4^t_Gx0=akZ zo+t1V`p4`KC^b!=={|cVp5?zl&bJfK0i7-WjSTptgRhlKUMN53A^lufs0#36pp@D) z3cstT*9{b6-T*NrgqS&nng#dY)lZ@y!Dfx;cQe$0=mG%-4Z!3k$1KS{TyKXr$P&?t zze#yFfm>>Lp!jmhuKpb|+d2G1u1^#OT1TaEV`=?!aHWI_25qe_Ti*E<`wn?H?jBt~ zuAy+cqK;5P8-?f-A1qBtc#TgALMj-7Hn8W8>2I@8eyJ#d9caZ6QK(>}O&CaJg?8W+ z1IhPT-1d7NyB}<{!0}D3Xp+JiY8$Ux$zy$0bwTtV6V zoA5prXr@HQB7xs*D=G)z(HqB-0crU$@Up5~FKP4uepU^E^z@Q;b%t;LVh#bq4w9N0 zgcKrb!;V85v@WVtMO|I);yR)1uM{$vIScK(AJ=!RI0o+f9D;l2>P1Wv4-Pc}hd{Ma zdi5G`L@#*bd^;SXaOn4rqv_u{eMne65-}mZV}PnvJNYAZYk8>Mv7!+dj)Y3d)2t`O zwqv4uKUWnU^VBdV)n2>R_0lkj2Z5~E@p9hYP;vDth+Y*Qe>{Qv`7YepUOf0|5!GSR zdz@gx>D-MX75Mw(g-zZlW24iYiS_ijQPlV5CH~sjL!G1v*fUdVFNwun&55=j(4j)m7t|%e}4wbm-g9WL$HOsgmvQyIbaR{KYsiIqpHi zL5!<9{JFg_|5ulG7BlrQlcTB0#``~~;e)O(!Yz0gNQ>r~cl%GX;hhFQ%tz0R*4szh z4P`?ICY|4sFBWfHse!cC9mrB#bn51k#~R*eODisF`o+0y!r`8s^<-jC_z%*E2;KJ0 zml<6=t@(+^svikkxJ`nUUiV6j_KEqFZ<|^yJ!B2P(BEQZS4d3H{4!4#QLKiNNu6@H zKh2zm4g5a0DX|+GTKyf89Rztf^ULtLB1AegpxAjv80elyI|J?% zQC{+^O2JIPv!orgY2pD3juoHDNI5fvqT4wZQXRA2BB8vFFK&TE=--Zy|GRCM4hs5V zMJ1YvhE`%MJssMBXXoWf6vz}x%`zxN2yt+UIeS#C-KH_a z1wPBHHjj7UDo!sarglJwx6<8AP9=(Gwfh!ebye&yU#)xQe7vGQ;_4sV>t9r@9AVt& z;=U$%(e-_J!h7MHZ+M0$#r$k8?g`0;4W{3sAR7H3H7 zMwGI)lt-zUIr|mflHKjJ~d6bPch%dAyn{&G+$0Wj`( zQ-H**|Kg=&;l{c;CIFW11o#rqLB(ThK!RlmTjijT{iR7m4d=S949_kM(nrC-?Ih|t zDam;C0MK<8fPb`?)Nb>=^uN2P!pLG7@bRecPV5KYV9=z^{n&&7N9ZBgQ`fIGeq<--J5sos#rT&J9y2C_!0k z(~;Q@V^!YXxjh|++`cVC;!6@!=e7*7Z)g~wpMLUnksArCr849m=s4q9eCJ`7MSSUZ zzR3}mpYD&UQ^gtURNxyId;9Ao!?xEH;B&bTp6*eo=ySlk1#oBsHaw3y8`=>yK(mGzc3 z$`N5;Tst#o3#Yi`YBBB*43#yG);3c|5iClKLD(F|UlA4vSz8wn>m{0AxIjq7pnJD^ zgi8F4+or?JlJ7=SaSrmdM~P|Ci-7n{ONgnp14~<&z>IdChk!f>do{B6tZanimM|eK zsGJ-l0T%1*&DZm5qF3={6zNEhNM(6B4WLq^H!25rPS;Bp0XAmq7)*u3-faSn^85Pw z7qSN#PegyI;&&5|zOk{t7$MFYg7|&T)mhy3m$RkFFc68j!aiRsi3eY-FJ94oWPsXU zU-zmAuyLW>^P<(!AC!!qj(-(-ncigdL@|l0rJ=qLJg=}kCM2mjw8{Qw0TR(zMUzc$ zWX9`f{f^Dd&SZ1DN3VM#O`7i&N2SoI5O-(Zo_9iT^58e*UTSI*4T5cicNZ2~LC`oT zxw!S@OVVBFL`5gajh^XX9Y3KN=>9AgTTl)3s=@7xORM|O`Lr$PT*O1o^f=h7YJ@W0 z*ro2-4o?oh;N~sJE_&KI(RDMnHZ1(*iXB(y?_=J+2t;KSIcwaQDKCclN3kM>5Yyv{ zDoMIsTb*Dk&dG=%UvWEy)7xBpquJ#ZN5=9;nXWaqynk5Bn3F7E4sMX;`=4ZJ=2l{! zJqvEM|F!*RO{MVeNwcr(yOH51KAU3XxbYL-hiSh#oZn$HN5|?H6N*D>UcnEGr#fH` zi|*o&v!Q3j3PH-r#MB#=8(7x(huq&w%x;O%5c|$OoEl`BJLg^GXfC^fJm~iq%6b#U zTz|KRBiqR)R5G|G=&s;;F>XYMjUj5tNsvS|C5N%tqS-oLm>f~#0dgOZ2I+w`h=v=a zK^^*<57mKYfpyn8Ff;sRj{=-*RWmbIOaFOd%oO6Bl~*q-7Wi$(a4+&*{AUp5u}MTi zhj#p)wiNz6E9A)Jc<#}dWm*aSe@b}%J0pV6<102ab75p3m~|p3??jwGEvfIFz0aGh z$=!V&Bgfmpmnu4IJ+auQy&xhT-5kBT<;t4P5;KuR4*mZ7AS~YWo7n)|Q+)~$5EtI(av~Q62raU>WO(~ik|MB`Z9Y$}g#`Y8OW$nQ@ zztn^0Mq9nF95SjG*?7xI&&6);OizGE^HPloW^2x4~Tg-`htN60Rj(fS# z*>q#E>)oW2>{glrVN_Vi7l|w}-j$V97bp++ED_e&*$>$(APkZ$8NY^DoS5X=CeF98 zFVV`WkmPI1kBA0 zQOKOI8wtmP8x8N)j4}$H{R)Zw1xb45{0!uSac}YHse+^!BpDuA5?;pr`6~e1T~+WO zvPZ#yun17Pc%{r4mTVc=OO64Grcp652~Mmkwk0J|$U9&T0DX{W82@+o4pJZg{SU7j z4oHFK3+(&nw!Pc?9TuN_vwK%Pc_;b0&ny`HlbL7Lju;Z=1%kX5Pw2B`dX*+i+*da= zLY!qny!Z|bSPB$oAE&Mal+08#)%v{vbECwBu>g_2ZSlJ;QxRtxg6Yib2|B(j&6F3d zMF}d0_v;wV-#l$4C~EB6dQvMbm3rOX4bdo~G!EmK8n>ZURL5Gz$XF{wwrvr{R)y6C zUz!;ejhyIDQI!ogE_zZYQ0~@v*@WM=o5#{)N0#f47}ur(sVmm&E91S_PgJYU-#k6H zMqPN$#^jC6fjiLc291kOEeP#|-91ki_RkraoO441RBww;mc)As(#p1sj@3{EZ&o?e$?=+{KPb`>IP$8kez9om zZ2Qn%yRpdK{0i!}jJf=wUJ7`P9Lt2Cmt&AU_}WuEgS$8h$g@}jdp>-XqlCkFq0d8O zS!D+7QMgW#P&l9{+0VsSsXT(ISx4aE2V6s6HU17$VBLfNe(}T`e@sD2j4*mIACk!j zm!MFDNheQ}FVX)H$nwgq)2&yS?C{n?CC)eMXs>hyv{CLW=*?%Y@Q=u!%-mcKK;)PE ztrRP>7ntil$vnQl=XfGCCO;53!9YT}qfolEHEB0?QpD9Xrdu!lz_>PV-_6#9pm6Z8 zGp=0N(&|(jRnRryCKd!bua!U{sj%-`&hJ^b^N;*`0kfAIeao!{KjD-F(+Nluj{~gI ziEsG7Qz~5)tLa;(nA()BfEqn&B4*lQN$S6qF5g=}Sl#~0d-G_r-QE*kxxT~H4R>{}ZK)#}7mFiy9tEl+(tK|%83;^LsEMCx!~Pe-RZrzU0IJ?beL1q7qe76MET&<8*D%7IL=%UlZ^AodxT zSrg#p7EztFNw31udt{nE`QC0Mh>gtit z{34oyqy=bP(SdMvwRx$U^^X>BrlIJ?L*=Fe#9`$IvlcH-ye zyYFWsjo;;0*;clbv-d+e{9iZrTsD8!FQ^?qe-Dh4&~cfjHzC}2@Z+SmxUjEH)>IC2 zFwm_l^IFl>ugR2gXE!P_j|R2T+-5n*TZYVhREX42Mqnqy1V0z*{)d|SUsp^z7bhI& z@Mm?F6#$)Zk!3HIbh6MosngN3$>eieP3J#hGx@30iUgSxn*lQ09Q?U3ztWj8tA@ge zv49n_*T-H178Mi3Tj&>OO+RNtg1!CBL(`pqy_npXwlA=_(r-C8=MYqt9dM~{#lliC zxoW9Rr0A#p=cVnY?c4K~bN*R;#N_KI7f$J;L&rVr0?l)j~YdbrGU`#_(-+6Hk@ zCU)PO`hD2fXcSKaCSI@z%De_HNIQ5%xShS%RX-XV8m4bt0wmTBKq0xgmZzeR567{30?p!){@{dW9@L)9R_!;15~E$ZlU z(4qA%93>&I@>(L+nPTrW^6bQuIT&7p%uK+t8G?*0c0tW77AmpOr z=*S0@r(R~CZ#eSv^CKh3>3|-!M$O}|pw-H01Hwam%7=%Vf0adA_u~VWLuBBA_d^Ko z4>Luz8a@ojf8)l+qF7Cb2MSQH=zs&^n~aPNA}q)G zAI%hn0C6AF38;l&3R2gQS8$qvz zp8VbJBA{Wqk1v(zYj1Fala@w`uf;{R@F>ZA!d!=Fi_sb8uNf3T@;5A-lzrWaxyDJ{Okicf={$q*fK$9stycw5ue%) zsX-sB4OneF2YQDK05zhbp+N$4hr@w8#{94*hdMwKVL}l^Q|s`r&YFMT*nk#R-B8&s}f4krxThK(5^;)I`oZl#*Gid{g z^t#13d1^*RKf$#8Ti_L5i&ECnxdePOGdf^kOkwBXa7|E<0*KwH0L~B*mW;*O2kh{_ zK=NIC0iNRybFb*yOCsz&(wK!%z0Xw5yt~d|dH?XCBm%O7swlB+hX#I2cVu`>H=T_EL9M;)Rd7AyIqkKdnZ2A+~X0oz)j=Sk$eo$n142WGzSH zwtkb}$!NMD=6TomrXAy-Ua=xNRS2$Z!EpW>wE#Ym_;%;AI}~{C#p6>pJkA0Db%uwB z;rKBG5a95if59jUbg!@T@yYl4`kQ_Hf&b&$A0~{_$`0yxYew0zJ{%#M11ta3k2Q2cLw( ztRT1x58BbWMA*j>g&);9pW%ajM)}o&Ecz=~=J8Yyp!5z0@de-l7%CmUx3yD2WBIcD z+j=pco}Nb&V@5eY{+Ea}+bFR0#U#gb^9yOwXT;b6;OHZKQ_b6v4LkUC$^W26E=mjz zYVNl!P*dK*NwGE}Fv5C6jEEz!`!X3;0 z0-^tzbnfbnHY=qwS|57!J_ERC-inoJfw?37lM!ynV;ykFjH%|zWLgReNHa4t(Lh0I zcRAhN2AHwQhGJwL`cvjBw&wuB+PEcH4!}i)Jw}v{pZ?{T;$JD17ytv}m{@m)hb+i3 z$rPAs^vZib+wbot7{zzaGCCAky0l!W;cu@a$0C*bDh4SrbMy@J?+0r?T0(GlE`tE~ zHotgMjnf4mIj26bzK!(r_Rawr9^k)q0=WyBtFj&!03p-3!j3#Ec<=7C&zOaAl@kr{ ziq*_h{Q6HK@b~*%gB6_9TvxR&<)G_3_fQ;!Kg}(g2*9Q7eqmC7?jt2eRp;SSw6YqW zC-k=cmR;t@CP0k{aG9tehT+cUJb@NhRdqEy_1G$SSDkNLFZ0%wR%?%*wTCMI)p!5f ze^cv%ZT0e}llmPq9`GFTZ@v_{QHp_QQ>8r6c$I*JasTp;CySK>Pu4yC#_G|_!mnRF zFlbMg={g!n$T;`rD zXmUjj&Wb7iB`PKgSqwTsIHPzZQ&F~6j;5w&ZcPo#72X4SX`=h%iLX-X=&xV@G?~14 zx{kZ}FD(F)rMBelx3BoVU7z^xqxVo#7#pxOV_a!h6RjPJ4pv$ZMFdlSIe&yuvlfK} zyZ%|H;G$palzE}(r;oXK0mtE~@y%; zAMo2vswp`+;a1FOQf<<6eh4GJ*o|5kJqIsvwEWTcYt$O}bsn~UZEYa1WAITJV!2?<# zmf8)09Nz!CsLt6Bf-Y4hK}^K57AhIZLB4Ms@~=^XPclCo*YI|ggoY^wJYIa~eH>QF zOPxfk3K(y;fix}xhz_#=eiuI3b8Fm`F0yQ9iHBlkfnqEIA!_jtgnUdTr>2Ghw2$$T z1PsrYjm~=p2D*w&D)sgCtqU=psnCC#f`2irx+WS39+pGVR?NZl8XVjnDXAN4e|+#) zyG32(iVYamw;SfaQjP%shvPBfEw!0dKz{+bhwt|6Ymlna$@^{~qv`lWebycpnO0u} zVv$f_m8g`Lmq&U3^^Jd87N_KiNl6Q!!#&>MBiMr3yQiHd*;|Y$2Zqqc4cu@miN8y+ z)U2!U_vEC89c7g!D9Ok$?`XS_367NB<)tDK0MxemLLJ?=V)a3ZKEA){WT%^_Rd@OV zyoI&Sk#g=D97aji(r^o(5*JJ8{0em3A@Nl^@v z27#`dmf_G4~LT@)B48%fyUBMFGIr3CM8(hz{PEtL~xf`9#bXG70ifr2b27Z(9wv$p}( zjhFr>9WAZ!!4=jGUpD+@31{W?;DO`vlP%jS2bBtO40Vqy8vgQ}TG!#X{>_RT$FIOq zqI*&K8@SCW z4fjRRVM<3S+)pL4UIZl$#(!q&J8-sXxQ;D++vuSS!AU9q-~n3#kVHsHxIKW`4qjdO zpN=2cLEhfpbwcCu3VTs~;fg>5AANq*ve9fpWfT?n4^Q_JHIp_f8z|8SZN5dq2$g+e z{w^g(mf<0D8&QU>OW^mKYeMQpTJUbl)Jo=-mlOCLR{vRDt>;%4V8$&~>LOke6Y%yT zI9Ia;3Oi;$8aQ1drcRe;SAvzz^>mLsO54dm4rBqW=0CZ(?G!upCXS}*i(Q+ZMkAw#G-+oN(s$5T4E6cJn_^t z1#{dYB2`DCkSX|*T>Db(Ey2GkTxe84PNha^>+f?Lg8NR6`Q6=IT}&(`HS)RS%WXr$ z{61~=>^ER+cHC_8t=+lnbG7ZOuzQ(QlG3#~Uq3Nqh79j?enM^pwR`vkWM?oDB4KH! zQ$ypEua?KJR&_YEbC6B43&OL5OKR@^Uv;@P(Pegps4>f+OZdD9P}jwP?1tzWHspBb zW#d)@1#s4SKJ&>IfAl?m;@`OB?JIv1dhs2KHvAS3rEp!m&xuBfeq<1C&MWbAglni%fNte zAUV@(;y67$oraO)Wl{fCA_z7#m}WqKFu#X)XlCYl+<@+ZsZ4G#f!AJU8cZj1+4;^3 z;_Aey&;~x@{F?60PHt)G`8MaiV^WB@f%;qjx1glGYIt~;C^7?N$XuE%N5n@HD+7R? zfT~5D3*$|RTJsG79{6Tf=;;$7vzq?%44|xdbpDVO+@%_P__Q0QhYj}9 zwT-N@VIxCmi8Sr2e@+Qo_ptOf(Goh+(ocVg!prRK^6M3 zwwBSt!U7G{Yj%K8-`qGb1`fHr*^Vb(v{8sy1M3Amo|X7#XLJ(&=uuLD->5zOkmKUg zdWfDL@K;&~#*gck>nEvUtX9XRRl6*_H}2@_YS`F(BsS_cU4SFs-$bl=j{`O-@q zQ?3JwTkAJWeU9}-ea>Ts6pi0{^`l@0t6tll!FFtx6~x$cU=2!KT(e*n6x3KK$ivfl z`1){va=0M?gKqh*1oGc{B@c7bsgJq2pwl>$(%CJ+uQZBXruV)p=a#fKJuDy-qgL}bcu|?rk7qZUQG#iFKwh8UtLfDy>Fl6pkqo#iBhUr^T+Aq zR0E_XUps1oyyi6~MRyMKK743ZM2zz(#ZTxd&DG)i82=7E1;2wIVZPJ!OFVjQrLcQX z*&c~{`IdD_zseUQGn<3}rG38nd-xn;K0A+3aOMG>c0h}v)7qZF}%Pu8WP58x4 z;JL(rML5=-PE$*o#PJ0hv|sh<6AAExtB;V#mdGdoI5*%E5xJ< zVB7n2yz4U~wX!E{ccF8uI(*QU7&c30g2zbxn|Ss=>Zz0bL=Y!>kT4aLyQ{0Gf`mkc z=#vHd<0TV*??aIxZo3;_Sg-9+5p67Wy7J_A>Z0Z>2{+uMeD{=iLdh|>u#F#tpaNg| zFbBd12@i7cSz;i#m|r>evlSHc7-w*RrtYzN z^jzasAp6P0bLLji+V#Q!)h#MCyV?_MR?t=}cf7)anwLX6Vz_^X{=g*E-abDFnu{E) z_@tA*y!HVz;6-UOUhT{XZiqkP@1Q^~Jm**cI+MPrsmyjIYyLSPgZ?;#Ka-woluMf3 zs?PqXl)naSyXEIIR4!7sF)@|NdGt4*b)9lU`6PPYXVl*w2J!%zM0#A=O}yGEVZZ=z zrWc(dlhJNBI3f)GXk{61OmLIrk;hdgqJ7ZwUj*!9Z#X?A1f|$s9WW?G^2I+y)&q3` zD0-|+B6Hq@GcfCcyma55LI1%wN=(77t1H+G>s&%` zT%gmcDGgI45!zG`4(q>+gL-2?(L7$`n%>ui0i5~z`f||dMC6v91>wc+05Rl(?lf00JG{9xq?~ zh#WkYC*czfbFHGlu(7e-Ot{4?P7PNfqwKe?*&iP_HX9N9bE*TMT4W<5n*K%tTr_;( z7wyV{oUqK6-RA4Fp8k$us~ARpm6Obj6S?RN!5cTs7X?}q=7BaxW+D`%ciovQsUa}n zCg2YqPT$gUYx!z-MC+S)#Um!XLEp|Pkb8Z6?y>|h*7>dJg61dTt)wDNJCLo0^_#Wf z7qW3O6`liP9>0NZP8S$JEo8}T{4~&0T*~STp1r^JAN4)nYJ>FYdi)muCM#k*0ST>Q zM)y`h32}C3ZBhf#2D#qo5wL*1kYFVMIpo=~;&UU%jFD?6#~{Z+|HuT)1-v#$k0vRY z@|})Ah1a0nLf^=5c0Vug^U07Z6&D}h3hfs!Yvxlh004OH@C(FPprXG3R0NS=w6j=| z!$?mrJA8;oz!~l|-<$eQljC7#X!rt(nWsV@mclkIrvyR_cpV~rU!7X=VgcTo*!?25 zUsiWDeN5uHJn>`8?(+G$DF0H%jir!*It+{Dpcnne`n7wuwz+~a_WL0(Z6sgVlwQ4f zJRLygH`(LtuBfoxCpM)t&rCy-6jlEXZ;d`vJ+`U%spj}gY2TLCqPx%T&|y~ z1xWmN!;i~^M!SzHg2hpT3c3PW@h&gBW8*O)Wv|DA?9;XVWuNX7J*z2f_7`6+L zu(e+Tcw)oN%3E`dW@ct=@)v*ghwDDho(7&<7O)A&I2l`(A|sT9H9FCQMI*Cn;+a_xhvO}b7w&K z;{G-Q*FrZv!AoMKk8(UZ&&K@bySd+deDZ1&B{fYt;D-$@c3r8q>pYu)ZTl%bEAf!J zptOR%)hCDKgphEQ%~&P{Z?l(P!ep}6U+T)-Q_6P7%lz&ZK+gff#5@{>m%BJUTvvf- zx5`d}MgMmn&Uc_a6aRadEk9&2`5mTBiy8g$CLuIc3(V)}EWfjOf_(@2H!2&T9Y~$w zMh7of4=b%q3|5f)(%d}LYY9wxbnkq0QN62WW=Ds*n)6#7{x;P>!1sP&hU61k;9d9p zY^#yVZ#JNSu1wbPzi{CmG_u}*hScHN1nOXN=ac#Yc96>fHkQ@%gInJF2Lc`jb@|A_ zjbr)|fgpq5_D2C)UDENEH6@$0rV?i}?+Z|S^{@1&E=E|@z**-C_;X|=CF`CU^Y+c3yMTT3?e}4@o~=F$ z;ca|9o1B?7(FJf?FYrzG!-R8dxnM-Di-nQV^5kIuLBNq*l!7t9PTse6OifG{u&OL| z5jpG3?CuSB{m`Uy`ME1Vp_Xtoy*%DG-J;C04)V`FdFZt@Ur zZB*8JX8F4=7$-2wSPm+cZBI|QC-t2L#PyGl!P6hf1bFmM=+z{LaGTAx*CbF)1)VsD z%ebefrI}lkO|@rc)O)p*xw$3J$(ONVQbQJ+>CwFtbZFoeX3=K35 zdI{K-qKGu9O7FcB5=6xYsC21{pwfGX1Qn1P6r}}7=t>PWKtl3<@;v9f_jlJi_nh;G zYbnbGB$@Bbo;`c^M2x893aPQvM#RZjH?uAA1$nWEW;qTQ&{S++`xE#d;Oi*>Uk_aM zkEcAf?!QJdVWk^r>iudscN#YUgu+V7nExG@KI$shSr~&y(Z$vA6=J`bzL)m;qW}kp zHFjrc5ZayMe~1}|q?k2Q*TtCN>7pLStXF(R-c`Zg%pnqw$G35ZlVM5iDa+;~m7{_Y zop3t1Y<9~F4bn*|IM4X}!nRg0G5-~dg8hB=2HFUY@_cpG=M0BoZmvz2cQyp?8l15z zKmVRj*E@BnR(xX>l~YqQ2#+yX$Q4lsdSeRxqU@U=vhuBli4Lv4O9gQc+uHmPSi<@0xd7}}PJRn*>wjemYrBs|r;!&jm09SX+7Oklj|*vop^)v8l!!LFlh z= zG+gqQ=4^6_l#hcMt>E*lX7TvQgy~xNk`%E%$$l+7CVC)Xb+%!Jg;jZAD4;6BH-?a4 zWilAmyeGVeEk=OQ4S5L8Q`HQ70)E~qZyA8(^Vo=5BvjW!uJ;J{kHZ~;SkM{p=Or@g zZB=mx33|p;_&OEt#;Q0q80%nw&?IVPJ2>OT;MrA2{;-~4Jg*gA#fwa8ey%pVjpLv# z?CT_<+z> zQ9b}FbnL2g-pnIeXj3aloXya996FNZm%8_bG)o3~vi&-kUi5E;Hs} zbWTmCn&462ZQtN<+PWk8Q2jsB3zCBq3Ju}j7U-lWhSetZPxqQ@cA(yrb3_8Jf z)LZ0e$PhR3brcqRSI%{# zpCfB+Q~h8Af}nnn?h}!DK{fJRO$e9SYw;GZ8>Fc#8JTE$t?NE}4~IE4SQznQiu?g9 zyyO|y6@<@1Sm~S{2}aJ_Z7)bt+wllDVFEr>tVdo8L%9u^m1TvKg^C_)u?iwR;v3qZ zPM>pn(4d}@;Wl*T23aafS1fUDIo7UYyQ9>$7uuEv6`cAOUKZVuxe=^3Kh>i`iayx( zY@>`*YG7YgO!)2nnwl$9JVL{{a{4Q>$0?uBzcNIQIV!FE>Ts z5K-HmGUG)RuY{L9oeHl0Nx4s*sBS@tC#Jl5bp7BN^TVZdnT%pTMot@Bs@#{Hs~f3% zAId{DuZoc~&&rCwKg~`Rc=w9Ovvah~w>C7?!*ctNLKb1b?>lDx!~uh`vIbNJ#9IHN?8u{QJ&BJ)}sZx(Y_#XIiO>90S!)q3qa)FKe_2V+IdU=co@*d z=uhOD>er@bmh+Kt6w{kxjw&PK_P(?DDZTw^sQbH8Q9fz})xiK!@SrG6d&NNQeKgTE zE1rfUL)JyQWH3RfN#A|nfc2Ta0eTv?Ls)SyU$<|V2a1Gin}<5glrrNMWajUwi3hLM z0qsE@FRvInjuUozE9JojRchOcjqtXG!{M;-e$wt#i|hxR9+4;j<=WsTB^Ed41RZ^5 z8r=}~G_|FgFVSk3{n&Roa!I=!^n1cX+^jFjV-fC-nJSxU@^*R-f>F&?l58})_MIVa z_T`0rwZ0?D4uWge^wlH}dikzf{h7%%$DZ=Pps?5DwG4OrmpFhy(s9d{ommHZILE$3 z?@r~N5|oz-0^b#@ay1wUyKE|C7I!d{djZ)=O51oC%F1$!G|TrA(e``-xl9CIEH&M~ zEEZ$~09x>U;FNNW4a->D4s&KbY0pNx^!I-A{|Sz5IGB2FR>ZDxV2U*f5R;rNXhxEA z8n@ypFq*%=m#mOmvwr@W&a=|dJ#^H@_iHz<$wd$oYq2-Hx&k4(x5?qPx$}+BPm&B+ z(0e%E)rkNHA^@Gi;!@_CQ*nfkK+9(^^2DTAr@YFP6Kqj*h|3APy8Zk>%0GiT-e0`v zc1`XJ9p~f&=ix?8T+RYVS%8=z3`O}G=3qEW4Bp~zCWuL{1+tSfKI;BSvTbo26GpAS zifa~U+%EVqsPjE}@+vg#!nqb}c4F69Be3y5bGBS}jG759$e!iHqfWuoy;pFzT7puBcjspB⁡_|MKr^tQnh&iIrmXA z_640z!A{1aY1yIZ>18)0jIUwgPZu8^!+X=skx&!IYsOV&sYe2*fg0;GS&-9So|DRQ zpssXDEenT{BjRTG?`XfJf#pAMD1Ur)Ejz0)zg=^mzMXA`TrQ4{4UYGwqR`)Hg_5oDC{> zE1PLN)FEDOc=wEsLx-c9snD#H*_tmW;Vb4KtN6ZdH3250)=M9?H6G>BRE9dVy=@_)wmLg9 z>l?B5!uKVVtqX(4J6qUCK-mP3uzvSy0!GmT-O`Dj39Nit&9myY-V1!X&0RF61#E2i z`pLG~U4UkaWNjWT+K>^MEEq!cm->>UFy-pJyv;tennn@^R zyBv9KA0?~|)taUBSlcdt|jp^5!UeM(#CMHi%eq`^!$i0KdALyd#X{TO5$`>16 z%k}E&8t=N@<0|GP-?xKhP_b&IsSmcm?dY^ST{dzKijFDFo3n9kDMM)&)I?DZ`#*fu z^Ujb{j`f~ZIJCRnbmdi=oLr7)N7tcr@k$x1hQaHjT{n89*5ZE!E)0%8xct{B`ceB@ ze4ThIc6XCdR?B2JR{|M#yffZT4S7?I<*b*7)#$M)+f!*N^HJ* ztrWQI%NO~Z?-Y)HYso<|j1_vWQ$=*Xxk_OFmKoP&FS+p!7Q37K)3x`<)A^N~n;Ix% zq53RHZ~<}tOxueGN(6nCwE8m-7@b(!^ZM9Kf%Zsyp9ZlXz9Zs2hFNE1dZ2PSVm+F^ zLS-IBXZ_z0a6#49$efam_(pf+oya_I<1q#o?FZ=bJ zS+Zb@@O}JQ!z7XvHnkh+ZgOBt{1*dgSG}rlHO)FoIIMlRh2LCVJ7khbYL#7opuwkm zVJN);4a6F%{26O6eW%b23Ob#H5B?JFo#8X)Gxkym#-q$Vgzj(gzQ+Tl{_&;)-RX$$ z%eKKYMIN})l^WlSHD6%@E|_+cv3_&umA{t-|N7^Zdsk3CCr@Tfwys$WVerl#7CBE* zBv%CWW<+L$#DT42Lc9N-`##so93|PpG>_JG$_Q-3E#2J}Bo=fkK0g*=Pt{$2rlWlU z?2Yp~7y7+kbaGlGS43R!ax^*9bNI-KyzIk_hE1^nfP%3NAhK6#Mqsj+1o)qY`D>)A|4f0xRfuLOE2o{|A>37TV1 zy8i)&(ni&J)W!1$q^RMy1(T|)7j{g7P~)iY4a5xL@JwR+2iXXbcZ;*K<=08V@)rwZ zp5eNANb~Ra&{L1|jZtoo&CXtPFw7~qdN1ZaU(Wds>WE45*K5wPzL)>rSkjom0nz+{~W@#I;LA}83shAs>(-nqg;cl{}lqtbc9V7A6;$epCj>f+1ltXomB&Y+A zV=D9f3kRGyFP~}H zeXZT$gh9mchDE2% zmjHS*;I0~dfx}BtGH`g!AZemE+9 zU+|NzB`!$LlX?`rN7^6Y`FoDAwkMjB_fYXF7mJY8A-sjK$^gIEg{6iJymp48$X@iU z?-MA3<5KWsw2}o?1+Sbr#QJP2%{uL>ptN9m}ZGa7$<4O=5ctD9}ahj_4us`$L%TRnn4fzI0$^V(|?lxttLV1 zJ#%vK=cGD2wN0iEjK)Ji(FaSw<r~^vIk8QvWG1+$4p|PJ2+c*69aXYDY>*<*cFFAo0#T(xLU@K2V z;ig%Tq8=SX@$bde-*1dKv)(arnf8-tPM*`d%q@vV$W zB>l_NzJ|y5<>j{xA5i474Z|H2{mQcM^oXdeW{dPdSRA51U{K&%d=l?iCl2|O1!;24 z=Cr?q3z{+-IGXI!B_6icn3;c=wv9&zd{%}D4vv9Eg4!mz-wzI`1U{RX>MXU#0#(NR zC7Dd545!GuD`te`SgWb z#@Ycq`=Q#G7Xn->>k60NWPF;#=DIl4eQ?dot*q}j+qoaUze4-O9$sx8FX#S}sLO}H z91!8TaQHykftiL3^4IxkhlrOWsiDZL+*qPU=UN!hZG?u;rlnCRq4b;wv`JN-W27D# zEid;6TSFhvo+y$b8GGp^>k6!=k@Lag{jN%O2%SKr2W&{<(y*e3e^(nZW_`0%lP#s5 zK9>n!dH%~$!6tOq(H?@OV3vz&^JRbsAdM*s&Tic(i}n zBEBv!?tgvslwKM*ucC1MQdl30U-1njR^_nM88!?zX7{|WVA+?Z``=`{Pw(=TKh(Rf zctGluK-gw^W+5Bmwd$V}W6mFL4zDzxVrdi3eD3&EdaOvqp+|kKYYs8Vfg?%=tvVih z!eSu8eknqX2mVg$U;KkZ9L_=p7w5QNG=~iGVX$auxMtA$3Nk)QC_Kxpu07Y@l%nO| zrzBKmn`#oF#Qj3#8LxqjD7)0no8r^I;Khl-mi~<`oC8F}N#z@j}U)C7M6ykd%!^j-_``vSiL5_7+RakZ?jJ9KX3|A~| zTZ$wJ@{Q&-!`c`t!i-Xud5D`=)u}zWiamQ~O@^=C&sy1kRnUOoG;j~IVeZ*jX{=Q3 zzIHJ`Ys_ToJKmfNw~h&BW09EbyTk%*JfoX@>)vVxm0$%AC{eP6u98C7;21YH&euXt zg)VB=`2{xv8FT~=kqzrW{rKu1xpMT#OWGUPqEDEb&{gc#bF3J1!^zbLj;!50cZH4n zJak+*VDKLA`at51jRPE7bL+6U$j)uS3Xia1D0LZFjPU+Qs)2?$Qk0rjqIa%5F|i8H z@qpzbD%KL3y2tpwzlK@N(sqdqRG}Tw@WW|} zEo?7^$R5Z#RpX;LPhsi}IfGHFv>%2Ok#_rK?|Up-g58A5GOR5unUPQ&mh)iqelOwF zi%uWvn)tQM==+yhphV>>&08@CAxPm~OAF-yO99nfQC+ge;;y^;frD=OXx^R1@1JGc z0wLj=*WmQS8rv`aeGp%Yc>7)pLVr882$eJDe#JtjAaTg)W@=|6MQ=%{p(#HUr-1#z_0qlF*Eu>JKkP;NV(=75Jq%&2`H7{r;Pb)q`l!uV zz0kMnNUW=E>S$-#D?*^>M^ic^Y?{jbS^>I+czRO)g;HC+RfG*?aalhc3q!d|y2sVxQkTb_B=xa}!se<{!zH3iup|UD`1xfsa9CB4O!_1wZ=B!&rQV24vMf3=yrxL-A_+;(V zF3L*^?))5hHa3Hr@Q4ns`-L_qU!4kg&ZW4?~G1sy3aA>IqNcLXv>-OK_`dI znj^7cUDc7o?X4!Nce+Wl9H>h4g3+>aQVpNR&jFrdRalui|k3XlS^s)b%VS z93F9gf2doQ5^DQJtDg^6i?MC#4S!S8PB?z*Y45Gua13$fFa}$Et)uM!N}&Htt$YjY zdq~0yLPD6ywGvqfngOFxkY1j()C{${IkWl%jR2zS_LoiR$Jmvu-J9G5gw5WT_hKb? z;famVqYYL%^D@MS=3URRTJyf$v%xhDS@|Q#?0n5Y;+NR?HmfvqtD(6@`|o5~UJ_!3 z`+w!nqsslHQ)DiPqXl9C<9%mgkFfpGH1-OY60NgZ1K_fX@h<0mswZO+Yb5rZ(Q-Za z?^nX7&c4pN)%*6Q^-mE-W^O!L5D zQj%6yEEwYlU51C_Dx2<9Bb>_-CDMP!EBf>$5rSS?k2Wm1dv#wwe402McM4kM`7~3r zQr!4d7&*aSVm5z*2%$6U@kTU+23zpR+G5a&3@RQH&Z8k>@d$N&e>YLn2sNuSAUx5m<)Ke8}N+l0&sw$JxKz^MXoMH&@Mia^ZC zp8Wo`w4XLn$k96Ay>ARMzQ{KA_3N3}!d>nzo)^5NoEZ=Ho!yHger^BHrCxQ#~` z3=AQX07TgFe=HR~iI+zui2JJbx^DfJ^ZD7Yd^hLa4zbtYsrkyWQ}s?O?^e5hirZ&D zlJ884K{llM^lmL_lN>7N5)tl_slOc=L%t3PA2KqAHIg$-gnQhp7n936d+GzY4@3tnDcYP#Rd~4k`SJ54g_ydXLuH0SIPw7;&|F z{@kq1ErlTA-;Ih8q-89iqTfhCdS=bvKnskb5M%-Q-4_fn;Vgaww0k}M!i=r9Umw6& zw8+yA5*PW|sH{46>wDt-m3DrSfF@`-WzSX%+o)paS<@*cRWh6KlQV4_HC(hnL2|w6)>0tWoH*9oD)JP^N0V+o`45Qr&#e5f z&~I}AzzWR^qfphV&2x$nG!jNLtYfDI62EIG6Y^i6iA|%ho>!7WZj|srO|6UK4woYW zbt71{*Y|7*2h*qJ@?l$s5oiX4wru)7=dIW|YZx>s@|>NY9dE&0Nguci7#Kpnry zN`}2lM`#ld96ZJMv7qP;{S6B(%f45w5jUK*x{O*5t$ba3gM6NR7!v1t?ia;*?+--} z()nmNSkIDcnXniG^tK--`&d7YR*r≥0d{jpDcTV-+mceQRLzuy`0&A2Fuo+RmM| znCemYDJHlhUcB;teJN&gsCzjv@@vFziE$;h${f#-N_Lu?#X*d9S7}7yLZ2q_Isc?E zGCb_jua@WDPS!2K*ZE!}EXsSSP1L3Qf%9R`Gmdv%Vc+y6*&q@#&!fRIcuCKJW0>D4 zy~U%oG8hKfX)exphlG*U_F7ZJzMWkZ4xW0Ykny5ZfJIQ4lSBz%VPoG(27sk@{fCYu z$FgVLvy0*T2l8h7tQu}4j@z~!WGpmN#uT7mRlUH%xnHlge=0ZX_eFm+8>in!6`Om+ zy7NRKU(A`OEVyvRAML3!rykIzt?BlonTivgr6MU8%AOPV0liX>gC}AOOa!ne*p5Tc z*83Ce|7|Y@g7%y#?_Qv7)KJ5;O&^cW(~)<~WkF+&N!&OjCiZo5RTD3{eyW-NpooRB zPw0Ih@0KJEYi|;PelBZ&5NkfF{`yYE3u(SDUVGGp31Rn<;DxdCc`nq{rQ8e%77G}hwY#MPWU23+ST^VsbgDgx; zeC4>i3?w)KAF(Sz&R;DKQMMs80XCGeei~!V>npP2UOj3|U`>M2D))UoA}nH?rASU5 z?T5aJXoJ{`tcfOB+@^YUXFpRX^P6(An+%Q6F#+Y?*n<#;^M`cfw}BAVUCb!P_h?B< zkArvN&B;AaA`1Y1@FdsxDw7F(dWn5|)O#41Fj8P-iKqp+kbPJBZG_@^c#!$Bs`hjM zPn|cpvRWd|@5goCDwA!Lo&?#I!W<#xZ%V9z?0f*@dXOI9ORCoDBD+6V*nKeW*|^cK zs>ybYw%E-nu^dT=Sa^LfI+IJfNSk_}#Y-j}x4UzA-_CDWmw|7M&~P>}NVw-6n}efT zTcn_NQNK9McnG_br|x|%HJZpD(pn}XtaAU?;t;yRfQ8{kf?%_QZQ}WHWm(0|wPWd` ztRBe}7rovmU6vRyz5jsA#3dM8}E;0ilEQKUhIvimlN6XwxlAM$P$iKwI>r zMcNG(o7kyidqRAHI!BcCiNKgE)o_gboU^=;n{>hX#2-;9`%5xG%c1 zhJRA!@PR3^jNajAb@^lw27CrgNUngjw4(O>xWMe;Nz!7FJop>MUOp0s2b?QazEPfu zMk9U{vEim1h~2rMUZ3N8So~QjXn6B=Gi1QVF`*3)h>f~2)mtU(>sj{?n%*trbK-j< z^OU1&7uXycYY9IoWWhe<^dG8pRWk*`7jF;Z;Qc)1EsCAyn7B4QO)d&JR6P}EacYo zWYZ)Cv+WfLc*+kHzlD<@HYx|K%(0OhEiY<^JOpt;vXBdu0K^;646&0Kso3q*qqC3z z*;D3KE z3R(SMj6bof0nn>SCI5xFto!;gsN$z^D7&X98t^I#BP!oW)(>uw|e@QR>KXusVoH!2#eJe&GD*0EO_c8+cqls{rhNdbM0( zBa4msFOqgo+-beMEb^Nw_S~6$JTgIi>ztd$C~J5i1g*0N_9|a695^iuZ1f;$Y-1+; z7LUBOs7!dc2z>-AMXtVzy^|$aP&h%EYL9cZz-+Z(0`zc!Sy+j+o_h`?je1;b6+)gE zJe$6u2WjP)3W$M)mnWdIToT$V^F6)flr0l{$c;gFc1Oaq>=WiiSs?k^{dtW#Phh6` zAzTHlru8xRiOinuAm81M5LG^N*p~KpHyDY)Y8nn?dIJ*kq~)g5y)}xbNZ+GkB;J?T zidy#O{h1DlL0io73)l$kU8;B0v@I>%Jr?Dcv|TOMcK-0`+^KzkZ|grnpX-6lO>lU5<{#Z#_O+WU!8J%FuK~T0cFRK&u>; zSSNyY&0mhMl^4$F4>`GDB`01sP3XS^T@h+4fz^%WH*9)wZ6y}n;PMfrsKym`BLp(j z_c+?`b+5?r2N`|`PUZ~=V3nai%M)f^L+`90NCKW~XM|T$ZDSn{*bGXy{yx3kTqD}~ z#CheWQae9wN%?IKeLLB5%u3|xMfE;?+S9X&1s=~aV*JAo z6T)25VpP~f{(-UnE#2wB?13DXxq3@0qm-aqFfw;9Pqf*^TObCZTWW0xlJJA?na8h2 zR2MNr5JK~`tmO>lzIBpkfBeV05~du3@DerP#UrUcl=X!(^nIx&;8r>Hpk&FHJ9YC# z`9^)sd0NXrAzx`65-=v`cvlUM>B%yOD3~63-*CE)B{+g);|7sWCN|l)<7BTL5#%Jp zAGsdv;#fk6!5%iZW=$)?o~B0)!p^huMv^OYWVC8ggatuXV-GErTZ1Yb7!C+RynNrs z0=+7g*`ds6kpSNIAtxk8S)ds&VlRu(M_ie|${RGlunT$<*)VJ0T3Z38~0BRrU_+v zC=SNpwdXXWt)*VfzrI@e<*@CK$MkLypEsqia*GVMVlubVHNuLJ^2fRs`t_gl6Q0B# z4%KvidsB5oVdE#iO|f3U{6XB=!wWL{+pjz(Mplj+4~i$Y2!9+K+_GBT>;3|J>}rK$ zv6Y@tQZ;`XQRZ5y|(KiWy zh+kQ4@2R2xSPN{*s`VV(cfTW95LUIJMEoa5_ivbAVW-l_24zB z3wIZXgFDll7Y{)UA|Ex^ideo}eWOq3+}pr013ybJ$?<~7+!|0N%k>WIIm~(@vZVRj zW;NHAE2~Ll>JSJbcw`(DJB;vvy;~%b9JNZ7ORUht<(Wfe4oTiD%ZrZviT(?A*{GrJ zD<~&J;zC(;_=2aUK;{MzaJQT|7BN~;BFMohSDop5*3>c~jG$Kipd_NZ%2sVlxJIUw z>l?a9W%g$fwFFp^&&DRF?HQBx7K@}u${h3Ycrl7R$k~Ec7NkUD6kBv zx&&e4a9B)%POX=@_uUQkQ`B#A4ODe(&~OovRl5TEydZ~_LbC$nBU)6 z=c9=saZPS*9w?{$>LtH28RAg&?EBG-@IKbe-rSb+=F)vt;X_(chxgH(kcuTOfs18@ z+g5IBTnjg;4%w5q(jsK1dz%$niLqXxM9ltIqm%!qwu!KFoMtAL)1Dq8w$9uvFfxKf z)ViSPzy*cV&caE{g2yoEy_Z5C^TGSABBrq^(5(ThcFufw))cc(^r-ujs~TB;XIobry+_3?T(==EN6F0F&`Pa_n<-3S(Hf|2*rvbg%G?*k6=1x`r zNng1Ji-XpC`p=^pImMw^{7*anNRl@?wpM0?R*dwzn;dMZ3$@&-!#@|+0`b%CDC)wH$FicifIDMz$TzcJf7xBS zdi2d!gFC^5(ZBI4fbapOh~28!)1bIkRr^a)7hK(JwlWOy?je-#ts_;-wN27%L*<;r zP<;)GS<0v+0lP@paaydJ)*zUh32gqU-R;;kGa|TKYW^3Wiz4%LQH=F~Os*Jdnj$&efXA!+WAEQD~wjJ z$DK~-JX`N}*?vc3b1*yEsK}(n;umVxVWse013T@&+?}h4?f)Y8{58Pi8)UM~gLMA* z{@@xu40^K6$s0n~<|pz0Wc`F8=0Ira^DJb~mn#C<*|;6!zhBTsVJMnH0VHdpCy*yh z54#pi%QUXv9=aEGtb^Y%%>YV=aVgtW{tBlO=yT-kpR+7uX8?N^$=Jp}XQ!fRlfs=7Ezjpyjqe@%%00tu*@w`K-A*Qv3C^~07eZ)C z@hh!8|0bdS&s37bi1|So;VF|fFmfo$4?+*;X!bh>A7f5-luohX7`cPr9)NE(R>1s% zt6XwY!{)H7J)O2a9f1%6pG&tA(L%}){+IdNRiR+JA+Rgo*>CQtk!=5M5TH{&9P_=y z^*nXfqBOyqsp^tCohDC8s4gl`&%?!@;UmJ$ON76tPK!kS@0{0c62_?0{?hCqwj z0W={-tue>U6^HENRI$@Bldr!Le_u#046vQ6xV9z7-o8O zIXsYG#b-*j{5a=TW@BfZ1Kts%PjUVNc`Q9x%r3Cx`?*$PR|nEXB@3-}s-b>5(Y#-Y zeEx;GOWija$yS z?r&-|*laKHnWgOx`k1_fF4!F#_3KkwOoa>NvVdMjnlxzcQJVvH!jPYMBLau)lYG7% zkBa|mx&xlD%AN46X3jorsVX}MD>E8cWgIvfdA-sr{r>#eIV?z`gJ|n~Evhr;AVCJM zNHZ<9fnT5O_YYXi*$My0SmR$2e*f!*a$byp&;%Z5vL+<_RlV~tJvOY5SfE=(FIy0E zv9qjKDfP!?K|Q_#SYt*2!QBKJo|#vUAX!Ia1bST((4Gw|tv-EvO24*&1Be7z0rOOu zcyh7>EFf3`+6yZewng3wgRMI2Mzciffgcvxoy~vqzl#xb(lmvUpSTG!LMIz3}ame+S>2a zz^G6%bfNU>BT8keI8!vxD-3?}GWV~(|2SOupU`*B2GLk)AVY1EhT|?kGXEMh;E~q3 zT|&mWU88kws$qWoDlYR}VsmefO_3)4uQ>x(_;3Mmp?ol!2s0H(`sbXXQ6FXXsHBPz zpgUwgYuS0&oTFH4v?HxNj1k?jYq%s0wDecOgWuS(MOo}?^g-E~X!u{s`p%p-MBatA zwUpTx8|~d_Ns&otryxy*n0&5lr?>t1VcU+=;nj1oeOZZbJI~IMI+rNH-}l8P!VS+P zMQ)BX35Ncr+jqYDf}rAkuxgtd14%}=0>EyDrPNu(HowB5&qc(RSg5B?#6Z0=s*pNr zQR8K>yEx)NaPO!owPkRStbc8E5$4XTHEO`8VEzLGJ^n=sjmMftwCEYp!?}6~&L5V( zdxyD?mM9EN45vXeAqh6k{f2xxr8_|_x*G$EhY#?bOH2P2pb-}(^e#tK8snC4 zL+JXg5Sr^T9oJ#pYBFq5rjWPmh*VHzLzUlkv;JL6)ns9}x5@r`f`6bti=3%LXW0u} z;dPWtesAjvq!uMAWGu_xyOnp>cNEnxS&aQMA~MIojs5eSoQ1BbKj^8vrWrgH>f+ZF zc5BxTCv5Laj9;uKHk~%-5Kt^|s~U1ph}c`QybLNqgJQ+|wRy1e-3iPFEyBSPW@Ly&jl zl}$zdr%pZAt~;AYX3v-Vz8z|Ea8Dt&e;Sfx^8nWgE3G>RJ3MBGfv3U%ee__mJm4D6 zE3TEBzLg{@S}Ucu#Minz3L6so&mb$WHw<1$op>qSU6fR@8*tj)o!)SP%u)qu<6l?${}Xk`M5gmrAkEBMO|JLqK)Ii(zXS}`9nv!I(QUx0 zrLu%4v5*5-LH;V1-J1)n^zNk~mlME|Mc>Z(V*jk`X2ac`b+xc9rJ)+PAL%l(8WB?c z`K~{Pj*4Y{q^_Fy`Vs5+Mgvj{JAo*tx3Sdr&QRFk%*I_A2;=D9+88CXox4e56;qXE zLC#7tR<|=!HS={dWvdr9$0&)Tt&P>8u)Zf{ml1#)gV^QG>);@P&|ewBGbuj627!r- z3PwPL`=?R@s2NN+n`|2+Zd+e{_Jxo}$g$WH-G3LjLM>|ugE~fO+A;TEs-5jm_m!@6 z;Y7^c%_?`DVV1ditMHr$KeL=?iwmoUUEofmS#Fsmd&bNjQ;$C2kUURkmi1Zn*?NQQ zD@Msew;1}mg`TrFce~Tos!uol=~L;iuX+@ciORTWk>;e3rQmACzBB9cQf`4IuFH zFos(rZLbIeTh~t^PDH7h=;gw%7SCNX7v@y3>7|aD|Ennd>wl1Lf?#CC9p0|WI7BWv z13^oWi$8ejC>hv9A0pQu!eHYhz=zP%IqY_-KQSO-Q(FW~|IPyUhui#`Zj)u*zsK#T z$laY}={pCFHoW6PiVD11hW(OJ**H~WEzxlA#+Jj!4xRV8bOf7P40+%0J>VS9dFUgr zsoE3tbws7~q6pp0S2c^A@Mv^iqfUZ46Rjv+EKR6s08AO-3ilr9eO*xXF|kP(unzTSIrdNw>#KkjPGq)8J>E5Cba*ZhX?}cvdB1P~zG&Z;eWUQ6{2afyJ~nLGI(=~mE^{tu$ofb0Wc0xNv+oaNOB zC2>-qI0Su02f*l-);eG|-<)4LJlQF4EhX>JU^-U5n`pnR5hy`$FE>JvUr3ny{pyVJ z_l>;rV6|YM%=>OI3|*r5w(Placjamm#~!$r#aKLK46y>12`Z%SmQ|<|jV91bkB01* zXgT)%3-D>s&`QbpX`K`i#wp(W{IG1TN6lofKAa9N=@mVQB^86U{`;Xyh-h z3Iml9U91nTlpbeNYu3<`BD-wY;>Icq-)@BsN+;oh=I)=HNjJk+<0OkdDbikXHSYiT zk`81i#tbX;?nY%<<67VGD8btxUDdqtJ8i~U!qMefvH|QPtiT9>`_8w)9k7sENZYK& z7M_#$x_e+a3iv^=pC<&k=d#*IQko|Z+*krg=*@XZ7^_x!^LCjL2^ir3W!*GTT^04# zxGcG{-zknRj10Ek3H9Y!YF3N)AfU`GWw@XXtQX#W9^^ZR2PF)4X1+SbaXt6X*fk;%}DUT=A3$HgW-=H>@1bovK3 zTY1>7G681c0#?zhK~qqEXA896W^Ll*@CN)0bMSfk^^+DOz5D&P1s6b#VL$}3n=I1oiIMV z=#%9;DHp15@@VPLr%~8KFz>ZN4}z?HQhMN`&{xi4?kJN+#+7NhI3*G63uP}fQLXH0 z)XhIuyqabCxeD)$Vdg_kW+BPB~Ypf0e6O0 zMhWrh0V7o}_%sVR=C#;MzMW2W8`-UrcT^D^g}t)+pqRTNMh@WJTf+!QFmI!dQ+7Rv z=Iw{(JL_Dio%iTw<+BM#tlwA)1(9Q?o5XL%Bh_~<1zM^UvRLNjv5es!7B{SRJd-Ge zT>AAOf|_ND5h&?S`y29RvY$8I7z-Vp9J7?n(@|!*rlaapnCblGj@D(A5ng-fukLs5 zPhPg#k@S$cPxe1Q8dT0T-k1a~H?DA+rH(mwjLC}7lQ5uwH%?~Gym`NA{&^oK@sl%| z)vmaEkB@PSWdd5Ordk`4G1PPToTBM)0O!n+xYxQ6l3RivQ@u&&ab1~V?w1>5bRgor++aVb9|zE`3PW`Z;C$B zOabZ!OPI^m3(rcQ4*?^hvePQp_6~v)P0YwHV|IG~X@XsH0R)xg`)^4bIhZKTPs|<$ z)ndN$l*RQO;3ohqiD$=QPmixwRqeOUJNzJKrYpn1!5_jHw5*aoIEFO=tb}`~arRZ| zD4UDgSZP!_Q_f&c&R>P{R6I1A zWhwW)MD1Y_L8b1F_6WNRc=_Tts>aPV;VO)YBUn=?S*Hg=c7@p$OzeOy^Tpd^yGwyd zVC+q0gRu>3bU{X)LyBkp`j?FPZkCDYA(bV~5kPW{v?re1fN zDUE3Ngpe%*M=OoPlLQfCXyD~NSC?cRUTb>#`$K7_=vmUnBhx`=zWPsJqCo7Yp+QZ; z=3?KbhQh|^$gnGQ+ilW~P<^wr5b!zYE9dtR33X$el_+y$p*6Lly$Q95Jx#4r9RBd| z>1-yt>A^~pWq_I9M=yr|Op0S7dMgV3v8ng0h~-A8>?G&K*D|_eUm+8orLyrhm5v+1#u5IPa~oK>neNzfql;;?B+PnNAz?#0Q_ z<$%c0ju>1qb?E(m>)^p zpIzXTdwFBm_@1!Hg0x)TGORHD1Fl19#OoqFWf{06v6EpbqS??Yqt63p2wELIf!$TC zip4jysS)W$&P+vo2he1iwsCf7(zhn(1z?JR-Ffo@LPCSS)FmODsuCzfFbZP;ddCb{ zftlPxt3qjdes3dXE4FeEq6P=gJCwJh3fZnnmty)~i!980)|KZl4|nLz&%mgqv7YM& z#1o2v9*ZWzJGq6+Zf`SB9i@NguJ0c9Xge+-?W*{UZS!|r^svnb?9i7Z?qRL?B_q8B z-bf(VV3@_JV0LJq@esOqdebmC`{A<<44y3WJM#PStdk?*)J4~Yu!)12A4|(O`pr#{ zH#nF?dLD6WxnZp*f4(Jp&)eMc6#*DkuT_#BO*CD1Uua!ign(n7Ucn2cZ4zBr@UZ{N zW7IE{>5N_-8H&kh{q;J2p*THP5=%~)c6h4OAKZztgRSf(1$NKuECQ-n?BHqLkDBrx zRKC#XfdO-bt=l?sAEU9*UsV+%WVlnR{$Hrr z#7Znc_&D~R{&2R6w2;560g&pqhlq>%@}{`gWrvf&!nxJOxIZxf28kAVpwhbI`@rd zM6n1)(uyNpCmqGOb|uSJ9*`X5v`hM{HuJzA$FSIVpCf0-YTgwBzBTMQk*G!}RF_-1 zDthiW#K}wbw?KiL#@<0i5&QW&BM2U~59hBP6D)McmDx)SxB~O6Pm&PZb5#KaYQQy5 z@+{{~!;XR%Zw^;?sbIDXpCoqo5M}<-9 ziG^C5gQd3loSEBQjvZs6%a)9E#FN6?43N-`P6=XaBJkieiC3i~vo%5;#g_i~iA`77 zn-&c?_aE%nGDaN&j-LG8=&bjlN~0S!fEJt$#7(&Mye%+z_E!u*4N|}p znZG%lxd{naT!}9Ijld8wK2=Ok_?O~tvZanwU*oG}r{2KloJQ-5cunuOo&$Wt&|Hcx z|g+uM776_&cr# z3^2U6X#2qJsiO?`So7WQm;RA5!vTK*rw$ML$|vr{=c1GG@t+4;d(g{h1{HL&Sb;fSt)7jGf1Dd85|_7H1cbL{+LsCs#a!jx0ymWBR+}h|#TRJ8JH1iQ=C)%LH0}54 z2}bF;qRnrrc^19o`TqKLjQZ-gcdwBc>jqW*t=F9Qai(7{x(eVtNBj;sy7g%ullyNd zkYjlRyBDb)*T;>--rs&{vNu%hd{Lybx`o-V$atgk2}T!nq1;ct-e3xrE=sM-k3b9l zo|Z(ikc;EQ{6E2QDZ4(DnUvWK3AGpeMw%szrDI*C?uYlw^2e!_wh z>-ByD`jG-1R#Xvf{QH^C*t6JN8Z8Mcy-)R=`^v<0f^FzB{IiSJv1BgwnS(FjGYGLI z=Jw6)`9{zGkFxg;YI1G&MiWAjj$i?)7C@;YN{4{hSb(6?kuD%YC`vC$5GzPopddBa zKzi>bK`A0tkQyM7A}xVX1EhUV*4pp8&pC6xcdtE@KbXl31IhK=_jUc+MNjyqnI2S?#m?bFw*UpS1DS)A**4^H{vt^v zl(*1#ux2a0a6!tQR^` zVgN0s%xeV~+1F2t927iDDB>3&GpAi__67Gpp8gO8CcX`4HDB6AT?bXrE?rBo1N>B4Q%oSDuo5H#mUI*D7MKv zH|SuZCiznJ4Pzfn;_Dqd=Fn`UkKH}|929yY)>uV8*vnlHm_}Da_gUKc@oBxz3LfE& zaG@Bfs1!FdhHH}3#97Dt*JZmtguZW5yYH@(;7~DV^T|CT6C^qGN!Qm|Sy7CC(laLb zmGnP2%iom(koh|aFzq~0p5mvm29{x8cn7)E> zYN}?Hqk?K4qk>R5T)QyHNeLmfH+}#8RZU(rL<$yWzx>+U&2~2)z-w>F57N{WBcg0M zp0`Ya9S2h55ZhG`7Y2M4%6c=M939I2-wXjAKMGul1Vn`XQvfMw<)SBb_g^*)_t&oWiAK>2a^SNZ&!d66OiY<%V@c zPkJgNmiW)Zzn{zhSuym6y>9BEaW>J|3^sYaBNT@0S_af!ksn!pAN=X;;^f9M0a+$~ zfU1^HDNKNyh~fcX}gN?x5skvjcC7OQM|s9K+Jn zPYEHGm@ueszZaIb(^oA`jUkFZ95yzAxCOlS4 zv_cUd9^(f7Rv(vfX{-lfMtjq%8VgGY(&CiTE#Toh@$PH#Ti>bH$BT!KpuIitZyppE zilKrhy^elo87m-U{CB4Saz?HJ!maFRkD-nrt-|5#v}Ja7F961XU^_d$|&|7ke_5a+xGqIAdP zmio;w9+->Y?y_J21V{c%6fo|Wg7%^(q!5ee-~2FG&t0%Gn%3+0BBbTAH#%A_g!u0; zn`ar*42zTgevqUmpbT~#1l8wL)!UE^o8imd)Lp_}k>%uM09Umm@<(C1@hgaF;wqs1 z|7I}cp#}LCZ>b%4PgOvIs_1fgC(sg4^vgnS$~Ju`co-daj_}vfU1s|UkfNDIl6zn4 zGy0svjX0&k)zYOE|MDOm9i{cDAGoM%h_Uj$N^^Dw&iVRfq9Q9O0Hh-s)9Q7~@fG*m z(#>WYFnTK{VAH`VX|CuadCoD+YmE?Qt6s)*q)i_2B%}>u z(p*g|*|8mxqSBzjx3ubz=1mnRO&M`z46>r8yhjXC=MgOi{fYr(XQKLG$CKZm@>nC>@ zH*!7M@<`pULMgh1h~Lo*{RVd5wz5whYx)<^-~e81;hO?dS#GB9_HI9P;UGDwEI;FW zm1X_A4-r)Jdk+Q)i6&zFozV4eb1XyG5tNqu!%WDNDT?E-JJTgE3nGB5Kg&>{t z<7jZysy$8)3Sgh|gFNh38=|ja+N2F-#C{JjQW&Z)Q2|)!K}iN5j}_5(_oO0FG(%ZIJsOy6~zmwye+*>SfzG`%axh}AhDg_VuI+X*6RX^5WTza7H(03(tLf=Z8{;RJjS;9%;~ zb!Z_B`8|H)?OF*pRbJCT@G&iVsc_MF&s-d*agYI8_5gerCX(Iafg>{5y$p|J1I+IO z_q142BqK(Wnk%C^_yJ4>)=brT6DCkJpQI;JwsU|WretHDmv8d7R{H%Th=3 z1{g0LH;(G8b@_^;P2()HiQE|>#W(K$@}_O@XP(KqaSTis%}+Pm!>gk9O>~vUhlTKq zhS(((x&!_3<*{{F+)k$3ca?Yie&Of!IQa~Jwr%FN!bw7FabVC2Qre2ExQy`?&fBM79CojVmqS-NoebIo@cuIY9#T{dJ0qN2Kbv|$|)JI z#P^hbAWM+m$eKcc%AxPY{&4_=NEOnAp}H35{q(Dzu)dkN{z~}P$%i0l1OXzGFMVF@ z6@Rgn3ys-}0B)4c+;!%&6VyAYF2TuKs|sKBDwWgv%wSrKG7d8Nk<*@80oQ1CjetpN zJ-%;0`=*a`Rq%LT;{mV{X3MV6kpgjeex-r&P?j2)$N z_~(nG!TZu?H(2(Vw>50Jhd;IA_{GwLmVm(}my68z^PSjunvSCrar6=y7Sf;N{)+c_cITXeV^*aTNq^G7B#%bGo$@BC7QEL{ zTW5QtloZU;ePVk{=7nD`Wd<9NY2!W*qyon z13bmnX)hhv;E1wP7BudGlo+1)G393Ac`+OQ(6|5U1M*l(H!R^C$AGB-S9Y|)(v{TD5&l;{7ZaMFKZPvARqD7 zq^T^RZ=HTk8y_qM)tDv&s~AQmv~pF;9Z`f4`aZyR4iri4{U!NuqAxhfeJ(R~ZdoCM ze8Q1iU_3JpO#Uw4d#eJ5AN{xA1GeDv>QOsvyJL0h`u+*DI;zeD%mtViRhtdqV- zU8m?(X_G`rC34_LbBLJb!dm0N!DHo)JK=60 zTXx%pH14v6_}Q87&?M=9nQA30^SO^y&N;RZtB8Mt&x9>n zMoh5(M9(K1Kj{73&wtUgZvL>*sm=jw!bO>iQ|CHrXCCLS=mV)@Y(YHID@%scvZveT);0ta#(br(JDsiJjcHG z$817-^lt?GQk5m+;={7Pd7b}O8veIb>ECSu#@ve8yzauG@h#CAKtkr(;g*|WyX6<$ z*GAoke|_lrcFR75-(Tx%BS(5n*yQJ!d&dP|)`_Okre9mtsgi=p=ud2C$p??tokAnCC!_1HFr} zC%K@F+rL@EVbUrCLQ_uUHjCUZRSd4CNcNnlF00f5vU*~SaT`=u{oxa6g zPJPSgksrb|Eb=YYO62khsxPCW@V~IZ#@~k#lN3PE<#-aV=Xx5+H0pm=xhA{2@n=As@$PUt-PL^d zL%kInZts9)uM5zYl0SGGzjBUon`bh7T(p+uwu;|x>jBukke5I#VnGYLWU31b$EY7F z|It$a{UGoFh_cv`LE?mq-Qwy*KdsEwL9m!S@%gx(AaXrK<+b`T>I~;|`bn#~AwIWLAv7{zaQ{THA9#hHO_r zW$L!c?D|`P@x8FS2!rT2YHfiiSETzxb$VVTZhRi{1)YG$UJ)cF-*%K88otojM$ zV)TK200=sWQ^EWBtJP1AR}UBBs?wD82|h3odWDdXnFBZsok9KkH+I6w5AuS`WKXg* zD{g!lkLOodZ%q`ZmYQPJLY*!kSuw`SE!C}mf-wWfj6^yYbIg6}p9iA8S;6)*omvVo z^rDO@?;-3Sm@2n7JmDA3$_n#JDV?F`%IUz)M1QtYo;kyZoa(DR+O_@eX?;@e4s# zy^RA9w&VU&n()0+Pd0ePuX=9~R2xKf1;4;mze{tot$eXA$bBZB6thtm5z#oY{#Hq( z-5#`OEBa4NS?)}+ulqWAcwAS52ZQ3G(vHBF)ws6*OVNJO4~);Z9q7ivL~OuW5UcZK z)?LdhtU(|fopG|hBmp$Pp)aZbLt>NcgD|As6iWIwD{b8K#V#5OVmw>v85d_`f5Xxr z(E_5sw8jm#{91m)ml{zs@UHMRm;aVMFA2DtKFFDH)IyeO9_gdBJzbbIx3jn$Xfr6D zQ!EP53$6Ga)i|Ql0*HA^zvc`Fnz7wwY_T~D^YAXQi%`gti<~2*Y}H*Hf^ifBFZ#AF z2Fa+6uWLd+VSbEO!UrWRRDGA+hed!v^D+=yvgQr2X)9 z3nasdvEyWI(U*1~gbyBQm(glMvmA&D3E*_cnD2}|pwUysK+gGPlw!4_XMFVCV<3F( z+%?8F`{%7YW=NdMM?`IyA(CdwTfXQf05|iOB(3>1+H^_0*%6OpCKneWnApe9!u7&h zZC`2FpwC^74QwjZPUpwX$Qv@b`DdP4n(}6$0ipSW+ZbJHq7LkNbS3~GhR4_^mzTFAV|;e?e+LZUbC~WC$~o!~ z?8wrczwIMlGpwX_^90>MM&-gcMXk^DzM*KBUJ!Q`$KG#t0z@5oK=ji$gB05x{o_2u z#_%cz_)^RCzWEVb8@hP?j6ihW0nZ8zE*?&{EOH}meWGeH*fJ&`z`BTXBp}xtDapef z%W46vx?4Uv@0~Q6G^h{*oTbhfIgk|FT2VFcM+v6RKFYzV&^HG-7jxybog2V-%soAD zM9~*MJq|ix>}0q4gxH+#9?_t%)B^WR(nGWWw?x#~1)Ekj%0) zDlU)7Wf#3>isVMGfXC)A1i-&iatx0Ee9@l0Z*RR@XAaM7ZmXKs5r zfuKoK^R0Y8VZPmyt_BfuC0Oz!6y+}4;>4KGywh$N1@tWvD9$4DGG0!pHAy1(o>dz> z$Vd}lb7vC=qN&J8;@ds9vX-U#Ibq zfD3_fF756^qj&?O6NTSJAvaL$)yGBkgC}1gqPz!0MNeSXh3|vV_V3)7YgssGj36#=3r{7ICWxEgo-jc4A? zbE1k_wylBGUkk5eIz#wt)Qk8XbEx$e2cu!;R@JafF5-%dga?1cf6nIQLvBV#GUsV_~#d3DZHX31xj%lH(CozuMcjjpQX?{#y0v3 zAZRL+J0t(=PwuY;#Pi=^_hcgJHDA=vJ1sooJX%;E5yPD%_L)zR zLj%jc%^k^PKjj8_14kt=qF1Jb@X%jRgyf0|VjnYfglSA70b7*17RiPuJK%P$pE|L@ zjU)=ofb!1uC!a&QffymJTiJFZcX@#PlEO^&(3ZMbWVqVnQcFW?ngoHTlRd8pUKXx{ z6d~})SnuBr3gYBPE|~!;gri9u!`^eExV>vFWh^&h$|jbm5kN>n+2{N4zDr{kGz_-C zyYR#CsETuQ=XbR(WK<24L z9xs)?N2>T^GZHil1x(6CYZydV3D7Qf017W=#Kk4{93W}3Q6jQzviT1pVP0LOvan#f zEPVK}FzdUmc*^aRh?@bQm7R6Q9cLdC?OkKkZ@e(t=t9hOC6;N8U(ecPdXw%ly(269 z*rEF6d!k)nCu7$??+?w$xrn(sZn9y07RSw_Q4Ftzy#_#HUS#SVRdJ@x@NwMS&u7?-WG1UxrEgZG ztx@mkDvI5q!!YX?MB7j4f~m~0&%3u$^MCbBG(Fdo*zV2J{9v*!;dnD$x7Z}TDQF__ z@6;>tS2H4`x_ctMcm7HSdId9NQWhY3|0AGjAfrP}G=IlXUjNGUvr7%+$BQD-&TWtF zEOCYiVARPsLpe$gMQHf3XMJu(7jpWu*nw|>ge*+hA^|JGn+LYX|1uUBGY|M4B5=Sr z-->3eYz`Z#1K0MOFvP)hFq}0RtZ3lQyl3T+?De;G|NhsUrOuM@G5l6Gwl%Op`6Qc9 zA^KG~MYQ{GlT}CX%Oa$!-Y6u*sX~)MME9}K~(v!uuzxG>=Wam{|sGwWG_k%!)twdI^v!*U+U+^N;{*36(Ba^gh z=TsZ#24Z|q=E>d}7QCbNEF`LpKJyIFB{xt%I+jebo8{06tV+2Ivcz^xqq;}(2 zfXNN&xBpr6=6qHj$j~?&fkt(%xjX0S0JB3+(;=5MPe$`;G73el)yi0JHk2-DhVCrF9*w17rxwG<=iAw zTMN}VhO{c)0Ecq-=?ZB;hFu7tZftviN_SEDb(~VRg^VK`9(4z}{m4aNH@DHwh<{}P zBo|x2*@D1}wd1aFT;OYce+k+2&Gn@plo!hZ0etu%$eyK(`o3vT_1e*-7eF0b@Z_`R7=xb#KZZQn zmlTEqS{W{O$am4tw7t1RYk?E>zM~wuCz|=M1-Got{Qe72+vXXU95dKD0KuBBgB8e^ zCXToB|MZYWI+;|)NYH8A8taaoQOU$8PgFqR5W<8bdG%}XU*g5Tx)(3+<4ZigV+k6m zXNaw@@;dpsLK4_B)`1!Sl0h^tCa9aMf5L~u7&}8dA#Q*tJc4>4MmC}s(Uj@+6_Y#W z_4l3wm0kT{xedY7=S2qxwv z-|X~*zC1KJ9?&IDdH6Lm^`)V{o<M+Ao}wSoS~(@W#}2gcaJH7B8Rgw&qT{6f>H9p%>PjH=j-d6@a-Sge zP^G76%iiq^AZYr8RPDZ16H`PYFXm_8^|a0^bn6Lyr;Xjs-{*(TX0#lY3c@e0?$)wL zXl6yteXB@nrTTFVT$HkQ0^HUEf6AH#1fvUH=DuOha2h!Dcu? zp26Ar0WlDW!-HEDY*{(=PBzmghNBnu$B)~r-9$9yYOYz_rAm??3C&Sq4xq~_MiXux-)XS|( zxjF9uiZS_KJ_`<4$d8&uKl6qNjzMjyY1%gc=(5K`4j1{ai{+irun|cr76hRCQ=mf= zYRI5f{SHDv3*SXFGpCHibD1MHCLWBVk6JsWs6a-iWxXhX)V2U$Mb}@(1QUn#Eg#`8 z9kO4Du>2t{@ODCh^Wc4)k{l3*#<0Ue^o?(TO{x-8@gdj>vmki*kFU6XEK}1KJZQZW zOfTmzpDOkX9!#FrhdI}*tol?XJ`q+uJJpgguL~T)ub#s|kKjyZ@99t`j2EAuIib-P z>7z3WnZb%4J`+IV%|tHH`;r{wdRDDjz@lwbYNJd_K$9n1S`o%fJ0M>?;B0q(%NzX& z>LNVN*JH3Ds{P}tWO-whDZ#U-eyCNQ4aw-x#l7GZKu2KNOY7>{OXIwpn`!N8+8FZo zuhX?LR9fvs=dO<%RPx8`GUBT6W_B5n={kC-)(aAf0K%n4=L(kZ>r?}RvCU%TT?y`;+l${nOn(g(03TxU0T>rQ zYsen%#@Vpvc4n%#oSo0&o5vt{bU6nG>NoQcDJjK%22xxwzEWp5G?MZ75yUOG;uLUX zzsm(YbLiM%z)0e#3G}%K&Awt>joV$q4iS)Wa68fmR35=0gcRPxmXg71&W{8x3X2u< z_H%rygH^K)T1ShC`Due1Vb%GQb?zX~5%P4|_-ISa)>~LR%V*(8p3mxuqUxpn{oX`g z=QTfIU2N&WHBeFwoKRNS0=KeMY>pSbS4IoQ-2()vxcOFn(f$veo;=AMQ~b7hyjbv_ zC4MZ`h$Z_B>uMj-(ZmuysAoETJ^JIc@-lzqO~ld3c&y$52rgQ+F?eN`Rajh@zkG}x zi}jJjw6r$A$@LlpKG5Nu-yUkKxO>gjHIlr27h!HeiS24IB(CPGFGckEz35{?o#TYx z*h}eqcQ&8Qvj{X+Os07VUa9UI|I^G)8)o%?UNabQ&QV+Wzs-O@Qoj~R1eW-` zN6|b1gLqE(q+v(cPTRj}3bMIhXcVaFUn5}Ae9(2Wet6jhdK4v)|N8advT})Al9eSs zIE6%(7)x-MCD1E3@!}EhfPVO%Ykiss5t&C?Ch!fO)`sjX}6nC)g0$DR2w0FzA~5y=!hN4 zR&w62P0^lRWG!#4ca0?JPnO?;Pt~ z)kN>H+O0dollkr?yx}`jPND~SX|Qqc$lc)tG*&obvb=IV@P5F!+}tW^w~wrg^*K;S zSM-@U(T0&Xb!DT3{ocN>0e;PPgwv0Iy@$W|S>qi5glKd$k zg6*h@4OYO5e*vkHNZ zIJvkoO7=1I0c+63@VHjU9{<@7bt`f(rHGQ%@|vuH=v+(@cjJR>@laJ0Dy&t<)yx=oP?rO%&0UgLqx&?C_KAv?F7LL$)~fnSiM7)!qw1=YizpbH z3p=^Hf0w}(sAngFLHqxD&t+SnMg5TouNDIgtTnEM4~mdw`qryBhh-JuIJaar{v!v) z>ouis`tVMVnu3FQ{eqQLY!|gAd@_ThaF0ZLqC0EBcQOfn_GGqxquL6`2;RopW|*5c zta_rh?-=%CL^b4!8W#7EGTDjyJ!0B7qZW)R!HquK4WAsHgt>u82mq7#jK3&~q0F2C zPg!-e)Qqjgz4|oqO;7D@=CQtKqkhBIh{?(l_|)}^PZPg}ai$RLMf*%XSo6UHx6!=r z+P+#3Z+l1eCHnBedJ8%#lip`dhs)m?j&_J#@goksn;5C;%fYJmDR^VmXyKF2Eh>YB z@$cdbV7ShiJ5cP!7J|1uxoQ%GHp|gzxC?;bJ1~L5Imm)#nY95<;YYhY3t;1H)mjJW zUU$GbdFiI!#!IuG0Vj$ARK^+>IE z_$qGKqvszeY+kVR00Aec8#e1)p*C|a=1kT2s_x#9%MqKGEIpE1Xe-H7w8qw_3;T4A zp3?*9yB;vjQiUm3lZp2AQS;EjDQ-N}pG@qzu2=5|Sh2&T^+&cX79)T)X+3~v)4XW4 z;EN~wiO*`EU1#mnPZZ;SK?i3Hqk6(Gv-1j+ml|;NpVl=1QOH@M-*9@FrZ8xaJz|oe z>1}ONJ<&>R8rTh?5L$|TNu7w|JALBY<<4;HHxCn-fr=3iE=skAz{3~b?7R0*S%0q@ zXtBxDrCbhM_tj1#CrlUks6@u`-CvRG2u?C4Gx$oJ_^w`R^7aWtRBPPt8QT^Th6Ie& zj$$pa#4jar?F+^cKICh{qozch&r<6iBi4idGdMB0#^!5%R50mvZIkJYP4jHTbU(ss zoAx_6Jkege=aQ?Vr#%_hXiDy|xxg#HriI>R-9t0J7R7Q=fe4sYo~e2Qnn8C57S=2P zz;Fqw+6suOWKF$m?G}?zAou6W-0J29tjRtHu2Ovf$2AnZnEUN0#N} z;4@Wq;-7n!+h#I1__PV-4}mSKqZ8zyOCo6nF1eP(ShqAmsf4|1h~nVeNHKR;$8 zY*6T5K}=xb92Kt@OdS?rky$JQ?MVVKzL}EbxEtjm;W^(~`-G|Yb0G{12Z3*l@ABeH zKw7^0WVP4MaLCBM0jsyx5Ai^L9&r`dD{Nq-JRoSYyG6Tw35wmx+4+&GnZHfWqLD)rE?#9J!SBu=1&}v6uz7{K)#O$S;4o7yWQ;#|k!beG=bLxaPFl zni(3AV%lz(wq3EYsZp{W;m~xz&?xmk?|S8Z*TdI{Fdx6oYdn5aHTH>U^#lXlj?aE& zzeF(XC#>luoj5#kN0g{GoL8F$IpEE6g8TSXc5en$ZPqC1!LDUG%D=8{jS~Q1+7muEemH!Up>eDi$ zhzH&Kd%!AzA`;+YOlL1+*~vgRCBT7unaiI6_{PsNmEbL1sptI!g=$sn)ceDXW4zV< z(GgSsfG~QGdG-#_Kks;P6=WuEUrxdUZ}QdP3$HF}NmGyh}sj}O<&Zis;hF_Pi^828=sVM3Gp z*XR6Bw;G)6i6x*G9FH?$IPSI$+u#aNN)`tjvyTw0wbW2AJNZO0S{ zFN@>(&9~>@0iOP~qnCj&zSJTzf@k*4EQ|bR>c5|~w{Jt*j)8^Eca`49Lo`~b` z;9P32?W6Ba*Zd~H4j+1EBI4yE)>LYZ-!JPkl+NK3&GhVZ!VelQgse4lG59G!iv06O z4rA%Wh8KXYTI$eaO{6BrEx`hx?o{piY2sMv@dKKWs-H)pr=zLq2tf0}r^}q`rYno_ zQpQ&07v8*8ZF{AEN?_yEV*CDRtA^l>XsH{IU#yN_$Fz`vm0|nYQ9>J(QNEh@>7NgI zs|rondfhOpcyd$pcUMAM?)&$Xo&HJ2Tld~wezoJ$_g%U)DR-#9sr4Oa2`qa_MTIe1 z;-c-rQ(4R#Lf=gpIKFoMn)|4gw|A4aN3jvzs8f2ZUmDl{l$J*=bI;%$-`QlPudIYO zZyrSKk%ryQ+EgEsw<>N28~`tQMb$J6dgB+p)=K%5Sx!l@%pC@n3rmRWs@We3Em(P7 zQ&d=()jKd?eWZFn`(;Nbb>^M>nh>7s1n*iX4pCH8JcgWoB#$20geO1}IAqoy`IMi{ zVOtSXo(UOmFf;Nz%lw`H;Z=n~veG9Iyq?n{q(r0Fr=>~8cvMh~wRlby>t5rTXq?je zTNX4l8u#UkpZ&Es@R5av3+IxGWH;~Vy=%wK%&g5r{@6aU)?n_Fe?KWZJD!NGb7O9& zez8b%eAt>+y#8tNjuq+NvkG`Pc|&;Sk8YRhZ|gP7oEMt=a4l}3Fp`bWS1fMzQD9&- zXWySUGwL(Tp^X|Cd4XK%>{P+H_m3@#9c|A>Yd+NNJME44(LHL9kXp)9{x-E;Lz8m;OAXnaR0USsw`orojJfX~&`eXP15WkN z?szJlJg36fpt*dLu&!o{nuRXxXJg0Qx>9RO>vOwuQmjiILoT$S&n~3Cq)i7CTvC>Y zO=btaxC+rVui>cE&o#nU9viPGshCy&$_-ZNjxdTK7HBZ z9;=|xZ1FcKNZc`xcLV+TlgU#(@`dJhcFXhcZg|HnPI|H(isOC+BOE^|tqC^b+}PcF z8yL7~aTrg@Y1ys0SoeOE%W#H9I+TEFeRjz^t|ENrL9ty8*Z%(goe@#->#V?1Yl&El z4GZGdt;0YF3<|%H=9D^kU>;brWg&Xro@z%&OczGp(%m*9 zxX>xVAz06-l>L42H>i@9k5O>c@9zT#AJ8Z>#J~uIxt#Yuy`oHkXd-X|79<9K|yyn)!p5_9!_g)>Z^t; zIcDgx{g`eFaa;KQ=H{#XBkSEeQ~i|e_qy&+wGrXuFxS?Qh-w1#AVk(X^H3Y{TG^4E zb3K7UK@D#f^^G2@9x6W@9Y>~LHTaTpCKR_cf=wWYp_`ltQ28|FFZ3w;@}R8~M~*Ak zz1}%%>?&EAvRykka8A~FtUDfJ{EIQi;e;pqA$&MDca#n@mSsbn8FN_03BAsb1p?F_ zbW404jv|(+UL1Hz>uSVzic@6N6z3omzHFq^8p6H#@PY|EKNo)7aC&X=En#h~&C^M& zi$PBG)7Xbkubj$!c?uiLHZt9^vyR9@1g^Shj#B|XCWq@&@(c3Zm4KN zyUx%R)sERFkq|T}<8h*%?)*J!Ghh*(_iSpXzItU*M=Qfc;;vNs5&$R9+M=>a#bXog zwC(C6@~q#2syJW2zI)Xx?C7|)4E4@9+E*2nrSs^pJWM7AW_8#6{^GYUtcY7}ubl$K z2TwbXC9oi>IH@~3eHuY@ExAIwa<8ppDP-%WBi6^{y$%C45qrwheY-^fjDB0f3K!+%KTC=@#rc#amd%9o-ioetC>O=# zm*)vaU#9xB*-YM9)a%#xzB~j2twhXGc`Knm9c{TJ`y6$*u zjSUOqe(vx-tfw84&6wfzKM5oIY(`n5W)SihSS#7mg%~dCcP+0U16nT zp)D?zaG_0;SNYpH>FEzRsV9Xi^^P8!e~Ur$wEk8zUPUJ)LbZKZ7{|ve11{xM(XU>; zss(1u4J6`Y5?jCIWQ$gKcZI9F>D&Er>MSA6sqEU@qoV%BDAl&s%EJCn_Hbx*4wvd zKiXyJQ8F{L6JkX#b&^ zZ(I8~>Nlv$WQCv}eJu8JHjHN8Nst>~Yx2SB6>3wvC+^)`bp2$h}yl-pM!S2jfnr$TlWk3X>)zNm2ZQy;{N(d z2V=vDk>5>|8St}@6Sp6A9)4z-nuOwz*>2j1T(k*hVdTc69L03u5@oIXImX{TSK#;V z)gP^GZ(>0{O?`|WVhue8|2aZ2e^_vY1Ff@m27c@qb;n8XxlPK_*UN_4&5V8u${wuW zC_SH#RR9;=Y7XZtePcOm7xt-7&V2CzLBD*?cT~-JC$_`J^AxRBH}kN_;RMDenIY~Z zsYm2G7EH8xen~z}N>^7T1`?wYNe~0vn&dPq9$?-1@f+1!zZ;G!^tR>g$p4v^$vG2p zK#*?U>G%BX=i|}Ad%FaQWaBiUqC*>4yb500yhSacd%nMUqLPtCYbJHOX zBGM#E2KE-t=c;s68<4szvblM@QE1+~;286Addth&DY%!WVEdT40u%$}bQ9*ITJrjy z$rtFw{<|9f!w(oLP$&rc-BpoCdDO3(`3x%-v>>4E%eA=vTNm7_WQ4DCId8-QJ|^!sTjY0h_O$UYL?;(FeAil8N>8bi%jxoltT7I;NQ=2~^AG=7 zYOH&ldHvbrUBt+<#Ot;$Affg2Stol>FNUUw&F$*Jdk1}M6%5x!|D`1n`STNTEfH$z z7^f%OKhJFM?qKO-q|7?KFEvAdJbC_H3gakyK-?D-Is7{zEiE&;BW~0G>p>SQ{Fo+b zVt^KwCF2zLX>071wC2q8Ol!$__MP&htH32tYT(zk0Q5#TRe`0=h&!w28#yjJ9KaJ= zB)tE=imv1q7JfhaJVzsd1yFYyK6p^rCy??IhOKq!NmX34kU5wLP|L16ko!+P^nbV4 z=OcjD2Gug=JGz^N&`86SvC?g|ks}+LMt>b6#G#vyG0+*%JDvdx`j3}8uYU~>4GA1S zPA;j|aTHZ0s+f@$2ec7n-$(3^C5){lmtbF={Z1eI{mF&aXbF_r_YN^>5=twmjdNA{ zxN$ayQO!v`GTz{LX+PU&&&)Pl=hMNHJ)6M5;9m#|l)!WTyy6#EIE^ z-*0b1sq;xYF52*2A)&oFH-W9%?L{bo;9h*$+{RR4+c ziG!dIm80UaQ@bvw=yMTuk@6(T+6fNFa5(DIreE92o2OQX^rO)Of)vr|eQP^OZSBTE zpQ}9arhWCcWB0r1JKy?@x*X?JoV{pc#RBi9YemktLFG^P9X%WvvNNuddDq1tp--SZ ze8IK2W&+%N>S_7~p5G48PJz9q1H&u~Dl<)e-GAxHRee?ub#VW7dwOC5HGLL_NzRygUy=u7q z!1Dxjcs1S1>`%=L7lv+k8BWhlc{HX`h+mh8*BSj=cvy~~6WWNHtru8hd0JhYO>`8P zv({Fr#Eo+#Z20o+bN$)q$1$m=82f2m8`R;ZjqMceA0xwYwY9-%k%-y(tAxsrG~QKf zDJ?m{>D_VpvX(g2#nQJV^cd%ceCkmtS#5EHXalQqjn2?omA1>Na8aR!qazyO)U1@1 z{VDubv^+Pv9%GYrw9Mfdin>3X8Y#)gMJ0%SqM8O%^A(>$wX|5?ojkd>GU$b+3#%Pb z?h3bWFn`aOuR5&}_WM*$#g?-RaJ(#}&O5n9CF)ywoZuVxVF{?XW%Y!VsNqTr-Ov=; zQ+$JnH`?v)etEyn%~8In{kRiKKgzHQoBAGZ_<`T@(oSlzQ_N$kg1o@^?^sn^DRW!P zI8oplj6$&;5*N>wIT$+$RpypYJKi|^?0rYrp^Lxm^8Q4|f7KS+lO~zX?F_yHzEO!- z5Y)TEIk`$|W6M3MZ&OfAj4%WJ`sIs6A&jW-&z@5$mc+-v+F@E)wvjSm-nh!fQF` zZtL9ybp+$2BZY`EnZZTp=Zq)#q-+QY&64;TbG#9R(?`l2W8IB654&EKRk)%V+03d4 z%iarthVc!o;eO3W$^63hd>@a~)#uDf{#BrwwlOLRHH`Z->G@H%;XO!(T$Kd$W9Wd( z^a1(`+mnfk4Esq^_u_TZZUpKyRDHO6wW67`4nhnScD;p+W=zJ<{RI(#&Tl&;P`>mElSvvHC|+$*?x zIMkmd(jgh+N6j5Kp~1T9Me_) zeQ4HN$>|A;&=1w&lx!U{u%4RQiCd}8>!l^17V7FkPLAIug}~%Hfe!vfVq%4@^r^M* z(qIiyA*-K1#u8&!M>HN1bTTsR6%;iFYq}R9pU9Jq*N!OmcjbtS-W`Ib5TUxEN7m`f z#tMpdFNpS|QtcnZl=^C0%03;92=Q(2C$9cdJ=gTELS#{iIc0rHSymlNHn(4Y1bhDc z`6z+dY!M7J}GtYGgl%l4mTS^bP zgzg>Nd`L5*5DZ!R*JmI|mp%c<{bYU*(BoWC?fVFo7nZQgs%d(XMQ_ukKY zJ|F%7#DUr0z1Moyvz}*dHHaowu#enG%9@1Am#^@AQRKNf)RkR5@~0eXXB5HT|Jb<# zaI-E?Gp?8XzF!(#EbKIeT8iT)@lM1!tBfnVY?XoxXynrVz=jUrE+V}DUfQ%JQFBYc zx1T=~`uVG^+8-^9>=x+!5EJgbWvNTL!4_06ZPpxI!(L*kP?_}sP}z`4Y>I6qqBw0&E&&A zMlQ7|kkVwY_KxgI(f|NUDSdE`1ou1*YhXZ^ocih|*;JxyL%sLgkwhy;5-Oec$G=S% z2ztD8IK+f_Nu2oC1*VRCm;=BsWFr~dkX=}@qpu=Ua zqU4_Kv^5dq10~7rD@Zjx zcP`SC^R#YBRWq4uSJh0X7#R&^19vEtanr6JUm#U1fMa8Z0x!mY~pXoCTy^s(~S$5d~>A#TWa4fRl5 z48UN@D&Nv`_{`?!&akkstA*ZRa&whvQ@M6+q^`e)y|!nZs$uD9o5MnPG^yToT&!G} z-TDzM-H9J$={I7b|HZBT{|ouo-w}tN!8n95$-5`cX~?8)`~Ce*h(czqyxq*!apE+nCEoZ#{Z2v z>s!ba{_!q#76X^V`jJzKM(;rF!;{kITJNa~CXkW-NE4|Y!HH~!fXUep^|e+U>!8l@1&Bx0`~-)*4`Y?2TL*!&4&Yogdgy&}MV> znR3SmwlcN$;8bAFotQxgJIcyP;z?ErAsxE-^wNj(fBmlixw-zwzaFH6x9uOeUX1J# ztkzdQIKHHBY!;U~djC~z5BF6?rWAcoQSNlQDRawLaFvXT@*!7gx?Rdf)$}(r38Yr^ zx;ZVO{dI#+Vun=RO|BCjH#=C&a!FuoR)&&ju+MhVk73@#?HOH~s3 z{4Uf9J{e#;hSKw+o5fQx|YqCE?#SHXO_UJqet49v4?N%WV&$ zUW2e8-PJwiC?G`Y5jXaJ7@B|gO;ef9=|xOM!{-KHG^FWZXKE-r+mCH}6zv;)I(t+w z$u-lU@4GqXblG~$X}tViO$B57ijP#?B+CwU2cRQT0QkJfF~SX?%HNNpYS#EOb`(;6 z7%yi)j@6upN<`>$(1y{1N2WxhjiI_}g){v9Fz2SH4aJe0Wy}-$$X_$cGzmK z*eRjbu2O_Sabo^kYl(Cwks7Iab$SBcFG@!lc;3^Y#S$0~0 zYu52zsk`S{0fW#ewY5EQb-m|k+>nOqp*!)j^6;SH(OyhSOuX&lB6a(Acu)FnE(&%| zNvZ89WGdEgoFN0RaJ9 zN;N&Pvm3+TzRfE~c&=mziUhf@!1N;{BQIaO_VsaZ-_4+))?gLtXQ-q)71YAy@Nn3d zFL!I}>YC=xpFiI-i48>gWA5F%H?R{Gj{iCu3LA zx4OOqF+rlcvhRzEY(b~Vq|t-By1KeFQ^j_AY2()4Eb96Mz#qP!^|3pUW}Ykkj3MT7 z&I}UZ@JJ6s){8gT*zcX*-m@+jo2%MoZj1MBpR>tSG5@kC&WcKDphG7V665<=bQ|hA zs@?;tHF+%xfLZO!aFi0}ay~IW%R}PFd2n8$`M%B0CZ?w|048rdEI;^?*5*Gw3k;pD*D|uQg9|L8Z-fWyjtzof2E_3Z1 z&ST}?JP;R`t;U|85&zpx_#fX-(*ql0S<)~Lbs4U5P3!Ccjf&^r7gu!IEaQefk+A)6 zo|t$tb3wBT@@hiGh}Ubm-K^nic5*bddj>I;X<{4jwk*if$zPmsStx97Tw@+mNgy?3 z$!{fo2fnLn9@0vt59Y0ne+-j zzwUEiSAoA*L5iQ96FoZh8G|`+b90U*F@sN@xZE5Zy!LQx+9M`v zWs6ryiBe9EG9@*~_Q9R&x5HJ1xh4F}v*xsz*@M~fAu_c1p>1USOz!rKKW4~h$449D z+KFV-T#^;A>`IA z0$_(U6{x>=_wJ%4Io7zsvKtIxj{%pcEKb%rQ@?^+J$$%4g5m;KVtJPAOY+g9vfv2Y z7pl>KVLUIdpwNSDmo~D)a`t%@wpy8+$Ad;opq1_kwn1>#{SrW{V_3ol$sxaBNYXmJ6 zeiZeTr+X7l_4aLA4vvTyl+1RkiEUq;Vp;|Xqwn#;14+*Nu46MDr4)8R*nF0IhI2xH zji*DD?RnJ_y|UGI^YxDPOx${jzZcLqf&zB}xiT3R#6Sx*?OXB*`K zDbLEvGAhlBsB<2Z&&MB?xxu=(1vh1+rJKFl{Px}{AY(YwRzZwS{A6751>6Xi0)C3*|5wAV*NL_|d8 zlmuY}WH)H@V(`<^ly@$~UNm=`*^gty28SteJ_)9zp!iMMgv0r?a!*&gW{s=o=cx!n z{3xduB+iqbh`wlhx&&h^@ho? z-a%~|Mu$|5$4sCf@7f?qqj+7iBiYl=%AfrxW}Au{^WviNmk|BWt8y17j`uUSWaK;o z%V#@#PG*{@nq8YL-b#=i`xu>bTi3ufV8?6<2W^&ml=tNQ;*~rjx0^%8S$5}S*2)W@ z3)}2%?TR0v?9rO@s}8lRp$ReVC!f|zWWyFa8MbCx>4sAld=fYraZNL#J);eTkPK>z zvKxQ`?T9-3M8ih!P{E~|^H4SJte2%NLeP$!pbY#c>ZB_8q|z1n^L_7NfA(bi+LN=> zvYSkdws_%eVUcTgtQ0Y~j+S$IN7?YpxA?<{s~F4IiII`SsN7ymqMxI}tnVeg502NB zA*-GO)nNB1a#Egu@ZiC;{A~Ml-s)W&=<3(3^M13f?(bH=N?+X)5D;kbTCQqC4lHq) zCH+mdMq3o%zO;UWGM&(Kr>pYVulJRN9NsF8A?eL(;)Fq;Y)~Xm9_c;W|;;wxB|?g|Tmj;3{QKDO?TW*7GVbQH5XnLF9aup!Ee4n{$}H z@N=3R`Sts<+Z@|F0uAKU)V}K|+J%%I-$c)y@BPY?lTK6DSyOQo^*B3WIq3RvNdBCa z?9KG)r0t(S=W08n0-k5q&W()3H%C31Zq!dSC{8j9oEdM~^c0qKQu%t4M+1HNqgHBn z{1xW>)ym8+T6Vg47CZO^MQ3McjxZv`YwK`AI_Tq`pYW4;x#-BKMrj`2ckQ_gp%>)k z%aBHD9S3W5uBIPoo=$yH>uH{#>4mN?_?Z&i$%qk}`H~?iR0-!E`C=HU!K430R!Zte zeZ*3~kH3zIN5?zD>&2}IFNy|d+Dn&KPGn{e7HoQ;4Js9{i9-swLA#tT?I_J|Q#uSb zNQ`#@JAl;WBG@sCEN2^-t(qPk%)i@kv0#cA&jU*7F9v~sb31`UaP|QY1_cv@tFm@| znda@l)i5hUn9LhlPEo6-C*k;8Ig0))#Y60>`U}?qZkS6wlB8v0j*dDBcxX6x^YLA2 zCQ;$qA5YWf>})LvJ}0m-iY_R9>vE4hW*E%XcBk_LktZu_!bhRA?{}T|jSnRyK0HBo zb`fmRLBcoqF0!(k_V)F8h2saaZ#;3^%a5jOj@f2d5k&TMcPk7-pUEW|8d|Iq2Wp;> zlWUC53H!E|4wTX!E7Hg~f6_PgpCLc4tCO*qyZU2wWtn#>mWIxyD6DbZFKrS>9v_l2 z{lKG4Jc3S<1Vu%^DDHIfNy*4$ zDbD?}$3C~qe4S<2^R=oFY4*g)`L2Nh@5bh)_O<#63c~Pj6AqH>QWzR~Hb;$k!TMm) zYFB8!jgX~y3BFlEv?1$U<3&VvcyRn4;)bQEkyL6IbQ)JaGtLi$C(*Q975S7tzkrt#dAwNnpbhqz&H-&J%5I|t2I@;oR^ofzU*IA zE6E2H+?noq?#r&?*_Dc+HhZA*{eTyM^^K9mi16_3cJ)&8DkiP;Qt>cF0tT#@g=oj? zerAhBNCe+xO>$%jM!T|j3l=i-IPH6Z$KeLwL&KSyX%!tB>#FH55w+MKKQ2U?j^N7{ zYy{AAt){LeC$8z$b8%|)=zKjEl|k!Y-3`!MqIo?|T7&LSO1%~p<0v~t1fi?x z&bI@H7vGy?!g@PcL_SIm&O!pivK`)*wlV;*ZdV`|wu9rqJ>-N7s52}Dz8ma2O5F0l z!&?5@(x=B(pMdfro$KSM?iX76tv(S7bEHD%R~uNV%Pm@-#2hpTv#Lx!KL4b~%k_EC z(h_~;bSmhsUGs6mYY(rwTwEQ>_37DO+p2V?StrJK-qETWKZL;Dk_(JQ!nC4fU7UD`pCi6UZQu$BVyToFU*h?ji3?j|wlahCD(RWUL}B4@mb zbv4;;Fv1e@=FKA)qf%+~BlmoFafUb%aLP~ZZy724ZM%K02OAlc)6X!kFgIF6Yrlh_a1A-% zbSD&K8sP6643aA<>&GAK4?h#gbTp+GStQ0R838MJt zEC6>}qb%&u`<`V~K~KA94W)QVi2kjVw8cP7fC#}Ue^C09)8BaJN@NvLxn4u@Am+U# zQ($H8^|T$+>D`?`cexb2Bjf;D>4o705}ukmuazyqjFCBq5?n{et(P(yc}~aL+j=!2 z$k{r?gZ(9rsnpaO+2?3V0aS%$9n~$PX*f?L+v{IDvTUWvHpo1IVMp8*=t#BDRgtz> z;cSUe$xF{C;?Hf-ryZuHK49mOwGbIq3_tjEpUjm()ReN zOdQ+{;>W?5J_@xh!N!d0+TN}nWJq=#b0HtmL9fQ2&p8H=J1JnnJ00%+3eXNG_m{wg zsh{)k%ae~TeY&Xc@cODS<@Fw;KT2A0&Md$7dpE|&}Ty$@JV32=QcBs2lJ*o|TGi9!2x_IS) z-_98{R)z-Tu{+w25w-T1Agx}jn#UZmV=m9DF@#zO$l=&&xUto#vCC}Z%t&cNrn=>t z{b5Bret>RCk9(>xQ1ghPlU|tn6trPKlzsB-+o_W9zV1lSl zUf+q3F_S{a%&IdXX8*V&sq(Wu2B)VC`pD9?wP=rC3PxS1Tq4vo6%TF08W>x{!$}|; zcwZhdsF3z|F_8i|q>~swSI}0#0l~;XqPM4f@sPR;%Kd1kJqBqWYXp?>?z0irEF zWa<0xgc}L1!-q!8*q@(9?Yd)sK5=uq?d%+v?GReSC2pu9e{J$8n6jtTI&4uXj>Dvv z#@$whQc^R)2>?E!OXDpV@(Yutq!q0v2qUTc3nv{}Wa$iL91~1dFu*i0H zu}LJY6a%up9P|Z)Lyvn#E43Ef-9FyaF_arIu-&RF{T7>+>QZ`-xAzNZl4#tkAO%t9 z3pX6W7IDP-srn<;jXLEEq-wn4Qk*0taizUiopY29lmDKU?9Vj|B|M*MtfgOoP}^i-8&$sl6jU*YT?y8RNw;(pL~1K?)z_}Jh~RO| z^X7f50!GXX!e2(-Irx$-`%Ynorvd+6*2Z`-;bJ0^|!eS_>-n`8`brYoTF6@>~MS4?UmH2lvvfz*awRuO_>5(HOy3EX{ zSq(D8usg&BiMZq}_ML9$`^fc_H>ppxr##k2{M9Z+IHU(apWL~ywYmPiM`s=~b@DJ? z1+!<>g6Z2(O2b(}fb*o?TgK84-y@m@E&09bT3U(jU{er|Qm@@hPf2-kL4k>qP1emS z5XUU*Lc)?Q?NYZ{J3JUd@lUN%q+)+G>qs=WR0to>TA$MG*}2|CttMAj8_vyrh`LT2yH zWd{Ks0U=h?tP-*shLFTtfAF`vz2xz&qaH;$?Xw{dAO1A+w4>y)9iicXx*@k&k1TWc zdt35}{sD#m+;RW+C-9)NW|*=S&Pm=^Gxm%ZM_yFnhu7D>`SCPWnSQUy@15)&Z~iuj z*V<)e99A`LiP`2oA6!iEQ6jJhEhWsghR@;gW9W_pv84&ekeA^vK(Jol)^1So+`lTX zZS5%Wd9sd8I3h%BSS-?a%8)6Jum=v=Y3q0G2u>j1&6`P52vd@lX3{qzzR8Ja2#M}gGWLsqkT%~& z90G%%LaQ7OfS;9_+0`girYt%C%h3Bu>zFk3l87Pn87G2!I#&Mr^_mXT^PRUpn|l(G z;4f}A%)8@HR2tjUjzRvSKRt0jojIui7c1;KmRobn$`XTt1mf$bnHB}|QjfxAO;?tM zYqMyz=ebNHwClmWlcyb3kevxG(_7skGS0VMv`Yss+)t=+)LR&w!+jl8-rc7@{mRJL zx;JIjltqzOZ+cGJebVg(wU#-T|KL@Hq?P&Ny)}IQK?dAC2#`kAx9-M3Ni=j463En` zjCamWuxgmpiqlI-=#yQfyc3$B+TdaL$L0~AAfDD5AQWP(9V?&`%5&Mg>7XNdDJWMR z@hGx3UNO7sYKLW-EkCCBsosCmqdEg-3;;?MSg(zh03DLDSVdXHj* z7vAyMk1ttqoe&D>ad_R9cD!MdY4wC0HN3;}^QVmPdTr>%{tJi@crfYpgHON?Sd0a4;@wv}Ub$or(w% zx@+Bxy`u$YV0w@`8&&kA>nLoo2X-)oKpE+3Cxj`ox7rUkrL(?qzS!OMKwV)MDQ_Xh zOOW7Ul3|c&Au?<3jwvMe3kAc*bkpN07ougjEy2(T&C`*PJaW_mEp4Mit6q-cA{|<$ zbS@r-Ux!GAsZ3ZTHOz+4!_W9w4nt*wN@R`nD%Ha0s)`e_*IkUfd35e7R}?Ln{k_A07=99u2Oz%Vy_T&7&FV|2A( zR$IgnCZZe}CuMGB+P(UsqQa)zVZL~@nYlSxpKs7LmA-|eB*nxFOSrf3*7nnN0L7%ND~*A z^k#i=0UL1Y9PC|F)bz2zPSs)^Tb1jkTZZi1&zTtMY|~I4B)?Xkm2vCvl|YDsZ|r7B z7>w<`gC*gq+1i@R=Q;eAiruG}qgOLR!(zW^=tKfB?GR&(_ zDYBbFHLwh8NgOJX)XG6UbZDLV(@&?Ng|uO()b6LGFu!iGT^0A5OZTg(DX8u?ab$Os zJADDlOGdzKE__LN_G2QRRQRsnduhzo95<(<>pOK zHc8AYJEN_gk;khc%dwo6xI@_6{>Q4;ger08D40cj#>Sy=mAZYfgHpzVfi`99VzQl! zhsU?U_>M&mC?mlOQfgjh=`LbC!}729Q>DzbM*(z;YWTPCn}LI#73oQ(dQVM2nBb&g z5hT4N?ga68Le{3#W~4Y47yDKI;ZG^<%U3UP(~=)eJe8xMpfIoEv8t&3&(av6kmS!0 zI)_{MbA_-I7|bnC2~WVhiBx0 zu}FUKrEx16R_xY<0noZ!85+OD$hbs@{M^X{??P4Y3pHA6961}6eI6}qNzTo(BR|RV z+di~vWm4vI1aZK>bLY(w`za{6{0Ps96lhBW<{&N&!zn&YI;@ z^1~jHiIC6DgXOs_LelfHr&|Zqd$Sn#{3mzuik zM-`3(2~Tfmb*Kc7KG-TNOC0jSw!n@Rw#M6g$oCEp>(D3J5siMwt&E&Mcm{^vTifLG zEu-c~eo=xJc?3>cl>pP#8D`f51B2?@Hcw8*RB$M_Ee~^S#{(?n*!^^wg6Qh*c@_+} zkc%}zw~d+H=UXAOJP(Zw-!?aoqy`af$+5jCM#1J|2@<(f;w2sR-mN@-EyOS{caS;e z5*0dHZ)ERaKj9~I9nV%O`|ctP{R`rIhYp|yf^3;bgb zH_gn<%#+<43tdf=jpkGF>1&F=qxg8gaZ*=$4~c=Tv(C5D_&AIrF0i?!*~PV??q>`( zWr4ux?JN5V2Ud0Mm#RVL7YMKJnc3De(PkPF7cF>ol`cxY&;8|c@3Y(8r1Vj$Y}OBh z4^W$1!OgSwc$y0$WWFVsCL`IbrKA?>9HvBRT7Oq z;2Cw|c`Gk^AWFBmj_nXr`2y zPEKGvA~%Ri>Wi)jn1u(nru9$h(9G3&aY?(3FV-X1*E`NbP9pdgp`QDYprBMCqqp}V zHT9$^DJkQ1o1V@>9Cbb|B_*`m>2Ro1H=%;>)}5lhRfbW)U4wV-)r7BTv}UQPFZ#Zq z7AF2sUUIvtZynqqQya5Je&kpA9s`L)srXG@Ilkh|%ck9pyit|cO*gDRJYKaVAy9E3 z*Kw%pnTzraEZVZFCB}%s@IU(Y#3oz_6lIDw45DBj>+G7ELysgnS<@mg$-vnZL4->$ zH~VFecNG4m%+Ei%tORA$%IO-5Zij#U^=Ys3K&9T4V$p)OR{)A9cW=WLksobV`XUK@ z=jP$(z-?Y$K@QG&jFR>azupa}e$F;~qjRM7yac=ax&GRx^6XLwNi-E#*A1;LM=leV z+j+wG$D19PjmpfOQ=dg?Ax{$q-@-R*KU1(|&cnVD-#*eO$IjM({yY%p(50(>D04p&HNy3NjO1;1DJ7+F3}M&JvU|i{_3~JdD^h^eDEy36U>QgE2)Wu z`OCJ9s(Fp2qg(#EJ0bjw*>D7csSHrz_4Nh$C4s#R9*w&R@^2^`S z?NC;M&@xTL6X5ti_)ZY^^Tw{hNcaTNwepOGjh|X;8|&3xTU8+whcbfDJLiR& zdMi6af4r(4mUYAAu1Z+b1=|@AZHQPvdy0gcp8mWfvvl+2Aq4K$keP|N9a+8atV>bv zf5}`P@_|v_-gMz6AX9Zo-EP?6R|c;+7k9?|?$0PwQ}Zj-I&Kfxu4a_Y@mW+t$SEpL zGYanFh0>Q*TnOK4?bmT$&xHo-=^<*jB}F6@$#X3@b|zoHX|=L@L9`|CzGHsgx~rQ{ zHgVl)-L0PJ2=zNF&4;5UIlJW>qk7?$;$~-^CjXDj&wx8fAE+nzR1tYh_0!d0254a0 z1`^pFTGrM!vs0H`VT{8`Yegq$wOj3af6g0uq-On?w$gK83=Q}ip{=LYsMPy`dg@iE zVKP)+@0g*;_j~j8;Wyiv&4z6}JU%ckI`HyJQsy7caxpPEArYQ06BB(ne%{D#-+a&e zcfD_a6paI7Af>L39p=$GYRRdeHPi1=-%TVhS4x+fo0uSgR`ii?Mnn+tpYNnwq)*Oy zb!J4 zYp(dYkZ!%HIv3$uH?Dart4F=^HSVSOtbOl+O}2gCVtjA!RU!CGpX$?b_I8qe^$>1V za%_|l;VCbxmEDbvww*nJI17^xO9#hy^FHT3L9RWkfDjzXTD@(|1zAUf82xB)>>z!G z>Bnqo9SX)GY@X??VrOw{%B1G&Kp`H{(0+ep)iKPWKi#G;m2U@^-W~nuKW&^8<$)+s z;;4*FQYY6x6S^uu$=I1+eT&w9Q~YDl8=bBalS{snFDfTJ9$Ax2E7!_efsG|Rg)P2V z4T*Hj3mJYtDcPM%IYr4j5#1no>49kRmHyWn``qbjY-)m#Wm)!&>7=OuA!Nb4Zq*#; zGs+it>VLYE<08rts=BttnTU3I?ht!BV>0V!ZV(7`kZ>N#8tPgt9=gh4z1Ujgh0{qo zwqlaFK%KhMM9F_Yr|Fr>4gwRg zr6?P5ZVVr$?5l*9jbkI++JnEnIR?3YbcD*(pIoiCS2N=Fxo{NLv)JUdS1!p#EftnL zwlH+CH{^URWS ztdXyePL!k+BlC7RL0R?c)e}NUJELoNi>(OqFy!%JApR(-T6r!`aL?=4;hts0iWp;V z(|;DnwW&e9{rz*WCD-d}n)lq$_W@%+N8c6RVBp*KB%?^GBR-T(i@89Z@Jp^$SX?9h zy076Ka;D!Wy9dE$o^Sn1=wX6-1}m*ow?n33b!8+n*M!VR zfs!&uNbX;hl#&yoBYdBde79QaP_WNpM%>sIMLjr=IO&u>ayQ*?VSS;zCW;U8B;EixR zFfiO?wamRDdaS-~MH+R2JnWeJ;yZ@eOylZ7Ay5h-mRz{)fNkwxdPjzYFj^3EUw_`I zxAR9ZqZHY2gwLmpnz;!jU!1d&)R}L1r@)l!p`)o;4X#R6S69FJ@Zl>V(rm#tLzVyZ z0I6B?pDFRv%D*VCQ~pvri<>=cJ9)kG0c@I!v$){&ptgYMWJZX1#0QD76&mIg6N%8Hl6@;~K*Rsg@P z;#D!2v(5Tm?CiaBJrDK5UZA3Ct}CeF$W8yk1>E6kk@qeGk=p1tg~H$Gl~Rwg^@y?8 zH)yWrOQYX-EKZY{m%HB8dd?Z}IhD3|OvG#z%M%6}va-zuRxHvm1rj{yjR9pIkC+&O z9P8;RUGw1nH+yzX*Z;I;dw*VaID?bcBDM}Xtkrque-<=1#%a_l2na-nz5-vnVMNJ<^0ogf_UxSHwoj{Zg1b<1u28${DIQW-)dFAXm9vptv zTk=2PYC=={D_ikyJayI|xfGjvBuo6Fhv)jWA$%A@UMAI2B4H+|Fxt{O)x`?J_3@f! z<=e4nC~_*6s9TGcfnkeIq4jrSmHe$glMk?ct>g8j1SMD}Oy$6!gr42qU6Yn(3>G5> z2W+6M_XZ0E1(TiA@O8|+Zgnk+81diu<>VIWx98QYsp#epZ2kPm{<5@Bt}xr_cB0C| zt{DKW<(sGwun8L}2~M3?Dlv9^^kYDipPZTbvb9z5EaHg{DUS)dZi)VW-tgBeJHSb( zJ?;24#MrSV=45;AiMy(qGvy_=&2J;q_DP4Pgs1QvW^|j-7g+wDy9X9k1;$PO?)|{h z)g!>849G1F!I#xjHVY}hhf;lG#B=ZHryfv)AKwbCpwG#QtjL`?a$jvba$m#YB34K* zG@5FoWvx_K`a2Kav(d4zgjRl|F;g9MB)j8h!V(@4F(YqfvZnZN&uU&5oNVNkcT-6@ zp_qFje}vv+_3s2~){7vxek2Y7aRmK#IGsm4i5zVu39NTZ+;0v3@?d5P5N9@mIZlah zHlF=3n!D+EPm=VcIVCG4{?#kBF134?6?t4{`#v;iFLH@&zm(eE=ueWn$$tk!u|cUe^p3ss^wL1j;zk+-7597kA(2a-kX~D|Bt`iBM4N8!I9;N1=ZcV;6uZ z$3>zF)4d3`C|0=~WH?q=`B*Y-P~+lhbwRIJ+Pd~N*?`wsEpvSC+xnj+kTxrU*uv|+ z26O*xAqVOqzvY>t%eMtPp-)-X)Fj;RUUB=})AkzrEAv^eG#Y~wwN%i4We;s_-}Rf7 zKsM+crEL4V9@LM`MjWLDTL%_Qam?_W`kS{c1+3myrVbEA1@3?x@n13Z@ihWuaCLq= z)5VM4YdcS>e@4SYxiP|3=YZi$T|@V#y;>xnAVN}OM)~$d>M6(44sL-Z>4w^ZruRf5 zWgIN*RA0j1s?(#rl3|tK`1THJx+8*i-jvv>;^+uyS+dW;0zHb5<>hYA$0tX=j73SK zF!Q^F7pP1ZlG@xZOrdhmX%(O>nX!_gbC+6%O?{Eua}vi&uPZVr;S6{ZGV*E$eDdY4 zyY~PH`En{L|5XEXTauJq4*Iv{hldQ*&0g4KSp0xWM9V(ew}5sHA+CMk% z3LAl*FtWDrZaKs4tAB(`ASoorw$4mVZQ4KQZ`}_mVtK}f^kU;ef@$ycCp& z4nvsgJC3dvdHj;#)t+A317>gG;EYvxq-K5p>jpvhN}YPWD*P>`JW0%2t^%gVXM&$3 zSozCC8|9(bNe7wJQLBY_{_%iTB*V(fMbqvp=T5_J`*jI#^k`+3?W9l3d)J17lgE;DA}=7ri?3fM={<*vpndhv zdTw6e*?j-;^Jjil39HC+9Y4zTW2tX`~s6J4d0evSsl7lFdP z1FjcwIXO8PONdJDPLuVyS?d9XD=@=uAN0E_kXy30E8&Ff>D*@ zlm=Ng-0wVnCj21P7Dycv);{RJaJ#Vl#(8HQXWl}>|8epw!d^+K4a}6eW#~;r>i&XP z{0gV(L=$ik$~rRu{7}dP8?!I89*Af?S1*&EpDSVcFr&| z4Cjj2eJa(rj5qhZ?Pi6KI3;9dLvIDS}*_b zrhDbz3v}!DGgxI+{`-Qw9*AeUV15wkMGfye#5q6Sf+96f5~y>?!b`8tz(_x_v`!H57@yCcwt3F z`~tf*z1F~)RjzaNItHyh1nun%KgYzvt`foy4LK9jawA3@nlkJ}-HekhNR}Nyix2>ciz)K;QC84gTQ%l&XhFP_vT;HEv zU9D+^^vqwTU=paW_a1kY=DKz*+w(9$GF{jrELXwzsSi}aZ@q|YfR9jk-sYBfK?SSK z=B44u;fT;=vdBAkd52)O5BJglO+scH9X77hmipJW|DzqMW0{j z`rnv)eu4b{Xz@2Z7=A{JY{!V&+SKOwFubDdM4Ck~ywBl)? zh)ag{xE>QRyMoWacBj|sZP4qkca`;+4;6YjzXgQ#xzDOq3{VFL2UYHK%3CunW3Cma z|41G1AYM3wQ@JA|r|a0OSie@Bgv3AQi1iAS#ISypUDsqr>nSeZ<}gmFEqHBf>u%qB zO?D;2sLt;GDz7m&fx>DQ&sM~}IrsZ#czl4AFvqZTC^mbu)UUXtj;Jl-=0g`ko&q}> zr#=ifcfRhU)uvs{*C315F%|q))$QN2d2UiNH{J{&RW*a8QWx!#vr?jdyjnhvRbLhP z7YrdzeP-;AW)5!F%d+~4zNZMSp!JsC_W9F_;A~~Pw8d3X8tsMc{{+HnAj1Zl$3x-2 zW`bk_pZ!r-z8MJjc67V2$6yqUwrsP(e^*odDd&C#^RK~^)fBx~chYK?O9xa&yU9EIP0D=k;PQ=6)-*7)_LDLg@hgw()j8&8emcr%2 z;IAk4ovdUb=+9g{a&oNt{6B)g?doS{yqS1n#Ux<;n#`T@>lv~8M>QjEOumcDlsso> z0gK)E>TL2LGo}+OMI22$<)VDc3y<`r_O{qApjL^ykPQty*}4(*Y*(uKfTGHwpX$oQ zDpknP{M@~|v&Sq#I5VSkwZ$?l`%q_Xiz7;dT3tu)-gE8JSf4LVKYrZ0^wfRQ_va%u z(({-36qxgJZSA1df>3X24KvP0Al5mn820?DM|^YOLO0VC+Z5 zmo{A^;y*>=|8frTWPXIFpDNm8$E+YTDbuV%0QHxOd0_q zf>6a#)*8|m5o}Ex{)e99Ebt@|qw#&VZFo1Xlfa=9K!aLyT>dVBZ@ zN2vF{aZr$A$SG#7<#%8a(jZ_C22n98d-USWbPR&xRWY$15JPXSw}1N7-?t7D8MsC` zWVZ~?brvh3nXmL>pur_2B~=rfQ5Fh{ArBtVDFYb|cE=ayH5K{dMU!$Rbcr16QczGp zR3|MdfWn<@Rsh{gD4@$SLJ+pZOd{fk&>oX|mn?RyoN@qMR{eLl5A{kIwU3yi}nsee+P~5vNy)fttvVYu7jvnnUFRN6K%+vAejsxi&mpXOcn(=+7yJhKF~Ti|<)>r=^Wj-?#!?USQqxYW8fa(z;5IXdTcqa`!9t?5Ozwie0 z1mx2daJ}^$r$dJAtWLy_V9ourB*CLXa;T_PRSXBt&Mi=?^6dteq`q1bobe*3ptv0v zs1T$KOor_kY?!{nAPTJT&9@H`Jdw-G_QsVSShT0w zy?ZT~tPPlLcdSBo^R~1jDG(0(#GEjoC(di0GN2pWW;PN(cb?`uzNi- zM>&IFkGaZvtNsx&tn=vJwnFe&|3ZFZ?(WgTF4+cob~=5e(zP5ob#_%R1?7~yzJCqT zuh*_74^1LLyluMO8z}Qm1gz9!B7zXe{D5tHx=~J>A7esUZtuX*y`Trx!HyJnLxZ{W z_r9ZzM;LEqc0qQ2cs_h(`6VsT*A}yKxP&ac_uo4jAUA8OGtxL(bj3xE@eC(8_L~^0 zte!Fbykn2Rt!b>k7dOD0A$wG0i7c?`^Y}%BZfZlg%a|OXAppwE8UVH5pS7eLv(E z;Sq95Bn(2)xJmP`hmG5h=}WLLWsBbA@FnR>L${o1%C?g@hv3C%$_5`S5}HMFflco5}B}w z&vp`rTrWrlk8?=$<~sDw#pTZMh%w8+!5M^dwMpmz7{_qW@SjU4LzoALrm)AB{tyR~ zC?)^`>0E--qXWSJl^Hui7&UXbw6*8(b(Fsb;c2ACQ&rl`J>qsoG+VH&=gvF6U0<8N z>euQz2#@X#U#43KH2s?>D*3Rv_Le-UFxUVP9db>*-~gm+lu*9vzs|$dW(J${`<)O@ zF5|Q4?XQUS-jF|mHxjY0SGPUuu-d@A)S;H^cotULA4R!(9asc!NvKpGPB#=ye8Bu)KE^~juP^CHa`A>SPjfQ`T%POfGyK<@VnED^z=(9sEX!| zYrO7}fjrZwy35U=kT)%a>ou(mWZB8gJWpdr)9%uu1$hZiqgAjrEnC(7Cz_))=ue+M zA?|{WOEjWFsPYT5#C7@vhTC=zW);xRwEkpNR(->HRDB+q_EDMPk3+03MM#EKv?x{} zgnUdoi?iqE!pXu;U(us|w?7@xf+2L&q50B`VJzGQ3bNQ5`Iej5ND$f zB4JhpXs@t@C>UO$WeEW(nHpyIj_yz6WtTu$J`Fl$8NrA~wjStG5<-g8_>$LC^hSXH z^h0H-+U?v3^zR)4jZ0d19jzjJG(SU8uZ%6Eg7HJ_wP%P-4vBc8@;MY$;^b&$ybya_=@)7 z!>cWuEK^h#v4X*15g(fcj!W6qAtPuq*Usf;AvUIa zEv}9d=9rv3U3ik8!(-U}0VXa3epU=V1H4del>Xtmz?(+yY*^k+B<7A48|U zsl(ScQq?c9AEu>w0MZ9p3D%de@_ED$Zlt+q&DHDUOEzAX|G0u~!VeP_3aYCIxOKaN z@P%vdw<9xiF!5`}_kFHkFT*C9u?}<-jsopd`cgAIenn^lGzc8ND)b>o@E{+0|ELbR z4v@*(7f3;8A(bllOm+=#eEN>>M1j^w-96kh_qb`nDsacA_G*QnnNRlERv)a(IY| zqfD3ePcc8LE;CtxyO;*m;38=fIKy~rx_x4FERYy>v7h62Vh^!V>*H8ceoi?J*DuZ_ zR$xO5fCK(%8yhFEvvZzRa*)Ir1vfj_GOagmvu#!Y@c|gnJ9oa^Clh=@y_iMSg9jG_ zFV%1AxA$*5Tn2V?P*iPMEo3uR?DC2HV~ZVYni?A6Z-ovCu|N=+4}r_h^u76rwMsqg z`Cx7v4`Q>L^dLZXOCt(>Kwk>b>3`|`2_n|EK{~XFN$rVfQ z(mOjJ9P-OW6^a~RD&@y1cw+nBMX_EMJ$o1IJMb+${m`{y;D@t{AuX4x&K4b<{Pj|1 zx@P(7Tf;AQT0z&J$&|KTjX#V%b13ph-S~P1qRuz_#fz>4o?|D^^uBF#E&F-)UgoF$ zMGv~eWyXYkT*(czb(01S_oRWrqU+W0m$o*7~3ziow+DSa?; z8-Kwpy;OXLG^TV3kcW=M52$Oy=<_w9Z&Z~pHf8CDO%26Bmxeb2d+bMZ=Hb6i#5Lbk zT-ObnS&-c7fLnEiCPQ1u$@!ZugiXJ}74xy)4L(V6U}AY~_&H{`*Uta06J>3=N154d zIT#NH0i_7vz_bHu?b1F1D)g3x;eUZo*)r~5oc<}5_{7~QzRC8?nKOOYq&9nbdf)bb z8I*nIT%4e(gV65>8M;9m zr7TD6ozc-Hl->DDg5oPOlSNGLsnlx{D`C8O%jic?{kQL4}SN$R>iU%v-K8%{J3-oXr`99t~*5 zEaX?9foG{32yziG~ItMmXn;Ej1-d(+tIik z5g>6BN+d556|!D)V|#u%h|6TH;t2YU6+Dw}e(63>@nCHPD;$@E($($9YCW>*zUL>) zLd}2DoN+jtLsnGt{hCujhRiZ{S@X{^#)+@JrkTVMH;$6uj_U34+A#0TDX~bsD_5Ki&2ZtO0K>a^uZ6 zBu?EfRG>kgp-}mb>O;VzE#E}o7X7M1hIVUT)#*kP^?;W6WzfgOBe~gfbC+~OfJbsRIM8Zj(KVX+oAseZP zA8r&58zcx$0YP*rpk8|ydmLvsuim)93zjR6PJt5eHGnn7V;kT;jKP0Mp2}K2n=nhF zllmZ5^=0VnQO>~3_p0gfeDYE~D@o~u7g$7qJcrEI-NKTs)Py|B%lm-&IjUq;O}(*` zYC#W&4ZvIAG|4ZV_fn1v#d;p?8ArNO`e(PzL6@G~?M^i3r$-e~B{!vRGdPl}SSL9z zzTz0Mc*^t2mEzU;QPv?M8=FP4RFqtFt2YFmAvCg|dP`(8t3d79%a@0<7buMSEm^>U zrD|GBm`yVHlXHy#hJlU@J*b23GniK4EiwkNZK25myoeuCXlFE>%ay3AY>(T^_I zFq09m4YPnxQ9>q7ms1zsXhp0YNY}0E);H2?c#=FLXdl*xoh2IRNe%`9--$iW!t6Qr z;WOXSPR&9AO@JG6aHy5I0X~= zK;}q;X9~_bpK%CzBE`tJPXDzzVg~Z2w6ua{GtJ2oyTQL47mLJF;+`&pFm{{fHZxFp z`uOqTd4pzmVGX@;ZEfwa%DsZH}b-LE7hRs)DNkqnWc9=yGy89^I?!u}k4qtKx@ zuL2shy34JC@X`FiLm0skr3u-o$b&l} zv9jkd;3C#1E{8$P%>KSM(RwS-OaJ`Obte0`eV?386ldkt4Mv1%#R*0i=^%JH6~S7%`}CnpC|@GTJ^1 z8YtWKp`iIT?S5r0+q}unHsiwM()RE4Y7Q`#T$jx z@}JLCB2F(z;1ofUSRtw~=5iuX8icmg=PQv8-D)Qdl8>baNZrr zZCc;@70^NfyittSFw*pSAw(S=KO-h`#wSU#wP5cbFM#uZo-e3Z7XW95$DUmHv3Z|C ze%i6{z`!Sag5ryQoJ+rWe$wCjBsbywdVB26#~=0ujf=zkL*BmVI`?qmWyzCwou;LB ztwPIV*A%Fx4&e)}b?R3#V!xy9a)Q~eCd3r5mMdV-77+?At7M(tku=p zL!^~wzQ)E;lps(@|IC^9kH7ZX^?OZNIdJB0i`%Pz1vq9cTRH27ft}`^gg=z08Je30 z^d^86*jeed=<21?&-hDERHdx(h54m|^A6(2jVp~SZyEOroP$o(-#%#>aj{Z`?+*CrHnX`)optr9o8 zy5qE}V8?V9?s@yUD{+1FyDC+oer0W0C+Pwn3al-;KYU_IyV!QxzTYp;)|7wE!;)S7 z-BaG)sS{b=ITwIe@3t%qJw_qt=Ny?H8y`#)dn0{lB|*+{JD?Z zb0&)(x^%U=sHo#3s~TYBU$YzhM+80ii^%Uh1=>qWO4glF>skkEBqNn1p$UkyL$~F@ z49QT>kL7NK|I~!y6mEeV;w5=uFduJ0NlHfUk0{7m%dL*7kbx<{Uoy34?Wb;P#N?aV zy3tRrb(=kUd_k(pDymIY=oLe(N9^(5YQLICu44dw#b>#~wy@a_m3zOJ`;w)4mZy5T z@Tx3e{J64`(wqd!+XlOZb?ZsKmR9)Je$Q`|@`#2}=Z)rRA()Mw-T275BZrytp)t6% z25iJ)b*9w1`2YHJq<;o8Bh@)Ey$IO&lAs?UOh}gKUN{f4mQ=c(WnE(r^cyUR^_$M162jrj&}N(*<@JWslBmZRa5el} zVmH_c@+5xzn}Sz+!0vGy%fTQ=o~HWsri#Y3dDpM2lwEM~70~7C-NMe#tRr74k!O&h z8D-)iNqtr^9}K!G&sb)PcJza9lmrTKE0BS$7XC}MbU}z$q+Y{pkNi`$$a@2gy97KS zaVvLO-a_Ke1}vi_rL4|qWsObj)SZwD7@wKX?R@ylw|~Q5k!Wz(2%jgM`}w6-#!s9& zbEcmtso+JqlNbI;pO}fflJM6!<<_-}YXw$WpW50MAxXreZ6_Y6EE%C5=3{JqS}&YB zH3D~%l9FQi+27U>eZa#ec|Q9HD#Zx<5Ao1h@}a}d_59RboS|S5E$DQQb>wUujDcD9 zHakfmmnBSo=GK*-uWH-?JyaGYD7L0BE|*PB*QS2BEB#YJ!bHLib-Ff;{lmktGH!a- z_vq9|4qFuewfW|9gZ{104kgyjQL}nr#?rUO6|_=b-6RTf2^Uq|+#Td!$nla0I*t^w zun+_+H?encLdqWermu&_2^Si$nMc3)53}Dj^7K_mXb(99jKT2;HJovG+_6&UA!|7( zGePxyipP#$J#f~!Y1hobh0^KDSrg{1Gdk`|$&N0&NUnQ?W}6TF`xNS)0I}#sCllK| zV2nA#yWu28sbZxZsy7+*y~?`*ks@rAzD&KZOu)a^c|!)>;Zm?rh-b*hsUm&n%k5PK zA+)3C^-Ah|&LU~W8l3~8f)S^U47SvV~O`a4P{} ze^6vNz%Yt>XZE9Q(1X-ZpBC=xL6W3Y9j2aG(Jjj;XSZ~Y3^l~mRAU$1vz;Z|R*OHX z%)_qO+IGC(Z_&M4M{W_aLI=;ims&4M)ekFc%hc`pMJ3NFAo!0Qn)s9yDFuXym}Qf} z@*g9(sL7sM*RbnJy$`?i^tFyy=2!-#&hCl!j$Sx;zU&)3Uhg7kYZsn6c`}%1{RDX} zDZU=t-_SMWgx0-T-G0LQ+_`h@rcd%~=tMbZ*Z$_EM7`ARhS{uSEm*F}k>F5hh}y3P zih26%p;vUwIXWbZ*UTjh@lLa2e7c-gMq~fduFW*erL0Z7xL2$xedo|wqt$QFXPFDl zFY<2^F}WWbU;S)~XEALAn05fsZ`}2JdA6wj!2!ErvQ0ut%IH3vgM#%#x`Mt9QpZ++ zzEpSXOuH^YY|-_6+0WVLGWBZg5hZQbt)q$M>5z zP>`{Eo2Kvc<1iS&csoLBfYqkB;qMp>C7|*DO}qX}R2f_VS`p}ov(;4sbeuH%`NKV# zX)bK-+{X)3z6j%mXUj7$ii?wA?w?YffrO&STRpNiCB@eo50fz%4X*HFGJ3HU*@n z_Y~Y^dwY6%`av@yJc+aJb7N~i%)blQ>A*!OfLr}ZE`Haj_~Gi=)SZ#+_ORyfhGU~) zAGPT3aLu$*8n+rxOa*)+yKFaW~N z@Rw|23D(2oS$q=pdTwu*gv)ir?$^bi^VVq_>tqz!2H%SB_b%_9`0^qm^iosY;_5Zr zV`TWTJ+a>$&!3;9YIbL~h$o3T!c=IYJ!UjJecgtiQl5F=3KZZ+EWgt)!R-7r!5$E>R7zOp$uSIFn z%%K$<$Ije#t6*8|QaUo?Emd;qPY)14)F>`Q)j4+Ul*Q4+ft>(geruResn`aN45O4R zv|t91CojbqPM|c+#{2+GaxjW8qXm4`SkJ~B|D|94Tly@{`ki@2t|7h$8(aFnNSu4< zyNS#4V+7`36MC~|ZADAr1PYdX>cE~mn-)DWWLoyOH z-p6#9-;W(4kgwmMDP3GAa~2d-^Wkn?DzIF5MAy>5)do?ALBw0V>?7XW68*2w42*Ba z;bzh2FavJcTPceY3LhnTpyL^|H9j#tCo$y8BdKOc1F6Qt-{VE^uP7+{5xD@Z+uWsl zxdbp;^bx(%=)1&JS)+%hXsd zi(Vp8J!GE!G#1l$0b{ZidvvbWku{M3zV(ApHba$ZUSMw}(I7KQTSlRc!H6R^s_nYy zQTOBgt_HU;Uq{O=&#$uQsbO+ip|G0yOc&TTKV+cOi#kk=W%!Ismr?cadp`yu{%Lpxe z2R|nUiu-2y#_T0X=FekO%rXj^bLrSxv!`Yc=`yY64zZbZDl0rrd>8YtD!rk8)5B`Z!e)9~z{(COqe z4n<6+-$ods?ZpP-34B(rV(-ZYwP(RJ)zps0VMUu^P8NK`nU!ir_v)$9Ey6^4|>v72E+B$M9dm zWVp7%7#)nlOr0_OC+!+6TX)UenUnl-s?5hfZCdB_C4~};f_;Z+rky^Ohi9Zf!v`OG z$`G>iFJ9+IN+OEa_>$6J@0)Rx*Vd?<``Bjs?>+e+VYdi~jqpiLcbxgPA0&+N;$!Qu z@}od^*Uu%PN8WdK9#^HoWoCLyn!Xm=dr~&mXT2$R5O)>hgV*Nb@dF=ko8_Q)6h42> zlyLR-ewiY3#OSUj+y!<{;w70yYR;*Kx_pqefLX!__Z`H~5YK&T#R)B;-yq%|=0)); z7Vm?sq@6UNyYXAn&0Xy1Cv(ra*yLI=b27q@ophb}5Hc7=^?7T^sWxrW;5JJSJFl@G_r#vkwN{ckMkaW*al9W}Jyl|O?oFiht zp*TNLYg*~*d7J}atqy*@2R}V|76?!_U+=TejcDJ8ZpatSo6g;86`4KNeJ7N`A6Ye9 z+rMQ-XxZxS%6nCu^Z7C_&}$1ceSo}`}wJED@B_z(!hueDC z@4fMQ9#KbBcEelEzaPoJs`9uX9=Z0k29%_n3kJyDd@I*{BKNVn^15)EJa-}>OJtzZ z+pvE$FvO}aa;e`;lR*eGEcPhA`a@iZ6TX3AACy+yi3`ccdGVz&UG|?9!LQfT z*(6OJc?M-R>@l880fCkpo_b-S@L#n4Z=ne4&ZEDHDnntmL#znz2B;Kk!edVczu$f) z`0@R)J3UifKD`E4!av1@amAOrR+O1oP zQ?w1va!q6W$vCU_S}#eof1h-pT2`u&OHiM7ryFxqvF^0=^mcD`?`_`F9g}H|XqmV= zDqR7M?{h{t6`{6Fv;3+0qpktW!4h9Qs~=}1(UiHg5^kAWM~&uT61S~DdYsYHd&8#G zsYG*ZDK*1CVc4T;y#KRXr<P}a9zjbdsE6*<{&hurAS zN4nBhK4`O#+!DTQwt^mVEWJ#kr3g!uj+VBReo>22O5CeMRiFBnd-=B)h6A(C9wLSg{5GOutw*k#!Jeq>oJ;=f@@1 zL0>su`(nv|D+%D*)ltk*{}MaMO3bz*iT{1H;ZIdSur%Vv)Nj&11AdC$uG@H^ze-PZ z*l!Pn`YDqn5z=S73UV;L8KQa`W~NbSo{NKt!sTy0Z4sIfx)Ylnd+&hRU9^(v0h1(j zw+pS`r>f0z)+{mhDZ0RP0R0P7k6Ca*W$%>K-*sdMDbbwIO5C6xllw%EEA~ja0Fg@* zkIXec$4+O*y2S?&WAB{&I2xOVzF_(pBQdgfKkK+D+BVvjZubNJR>L-(Sbx`rxL__# z;*dfiZY0JtP1c23grsZ^jCdca_if`%kHdUF2|>ysX|<%Rr&<}<$un55+BUDogZi}y z@vsgWJNI4g3Q}TO6~AgpoB4cwPG)0RiZbRJ+y|jEpzC88hhKTqIrZR zf`rU6I#`sk%qnUV)gBn^H{r*fmiMFk&=>lBDc?bZA}oqc37K~bqmwYz{8-%v{(Xq! za+4K?&gclg)Rg@5EVnCw|(~aoz~!jSQq;XmAmn5WR%Zj#hxx-TB$* zuD5v8uULQlzV_&@xKpWW?^cQa9TCglN6GDVA(8%~G+~7OYgMeb!Jxt`!2EpaiCH^- zrd`3R)#(dRiT#a$ez(<|+KX$4{pfe664NKyeDHZTK zUn5?MZUN|Z&s4qqi&%G4_IE>-BJP!(_kRzr7c98dX|5`PhLJ*C)Q4n-m^iqYILKS* zZ8?sBq%~O32eCM2qC*}CsCQ8TV+kqeA@UFoX`mPCEJ@59Yj3_c@B;k5Vc~cx~5^sQ#>iOnZY1t26NVg_hve1qvE?Frc?+NF`ad`B55-&H;BIE zxIS@)6V&EQE*qIAZ&dr@6N*+dH&&WZOWxZwbP|&-A@?P7WY;D06?2z9b>bcnb~mZZ!rpYKY!>r4S#V0{B~3q{F8eWHs%k~S3etz ztui~dl9d@uWiSuUKnm0AWQyS>}4TOdt1xu(k_{F@UN1nXCLaja9Wkexbz69IKC9{+`=c82l} zY&bimCEvtr2DjIggWi$|e~BnwvNE1J)+2gdE>rw)LV&c8g8c(Ro=oTPoAR57;5|Re zCdVTt+ATs_qTZf3-T9M7^7?j*YS$pK6r-dZc#o~;m4D_xu{l=oH$=M~@ah5--T}hN zP6-0}a)a5;0<(K&=?Bcx&_oO~7j-K)F4l}6*LZU60{Stg0+Eu*U`RqYBM9&?BHA@w;(oMG67tRD$(%SzArqg-uOg;6u23#+njYXhcmjcy_ zc+U$VI#-qyq1z;%C*xb)VM!}hH(M#inz1QYh}5uHAaRULXu{%O*?s?CBix{h@%ecq~0 zx~=iKwu=Kw9LjQFt|kDnU|XIel=2vJ=GM5oK}J?4Oz-i`TuI)vT*My-{>>n@SieS`mQ-O zPhPs-;4Rh-}n2T;FET~sromrqpdfQh+zik$Cn1P2mw0;eoBkG?}P2(n{Kv6i@Ap7 z${{Z?GQ$F*>~NpHkd_T$Ho zy2^}(xMb>CNW)nr8Re*TM$|s9UR3iX{p2&V|4~^Q9N_99Mmj)q^+`KmF>g=BrN%a! zp^`X?5z5(8lB;_=GCA8JWa9Do=D2Wk=+>e8itnpC9WRAtTf@gCGxvs&aq_$yfYqdh zsqdBBktW%)hp^pHg6F>9XDH%ews}_*97+#vwCv>Q4-n>}#%JIDPDWI6Gh`$(r2z%aCm+{e&)5#q{ zT5GY19WPag9s0XUr^+iK)a5!P=Q6eK&Z_broj!!Ml55o?tD8qYorz_Xeko03QPC6y z?CHV{53s}yFv*lxRWnCb8C_ZS2SDdG0P$C}5ZkKpV)3@=!=%ZyP46QFPPcC_?aGL` ztQzu!Ljl;Y`VjV9H`!l!bW2(Oz5G2tid{dlfFN5&jUL{J*4TLs3G3ua>e*##QR>SM z!AzbJqvjaRk&%H9Mf9`kb9d$;Tl4@Dpso2^#@1~lzX~4#KP_19EGo;`Xrr;)ddE0# zG1sy_m_q+ElU}FBb-36G%?LhtZfC%Y!ouR8O>uo+`?mF;BYWm_`W=94rqpeV-g2jB zLd>vcx2)i^oF^UUJmY&SI$ml8eY&i?E5-BDhwX2-#JfNVlvj5%>U}X{Q^gGaJ)hJ-M&5QgVCPpU%gdI_B36YaydzB`snZL^rIG&wwo4o-z4A! zzS}9`pYpZD6syY~JCab0z-A3#+SmRJhh0noz*#Xqlb=(n- zQ33i5?#;7sA&^^s%7MfqG`+|BT|R2v1Nt)46H5=JhxbAHxFl`^rk_20@{T^4-3ORFoc-<&A&x;yGWbFt13o|{3 z@}Htjwu}poU;f-K^<>16&PK$nbWSd{IodloXoZ7eWqn6kBe&+abItzFfecx#o)v-v{?_KX^KZ`Ix(< zWw-6W7H`;}KjdHi@;Wab$O$XhKVBS%sh$1%p1oss;r_X1TrqCNa)Xl!NniS@!e0ur z>WqkycHLxpA=)oOPM`Wv-tJAgW?mh?MjNBf=|dXm)qr!@te8?l;zM`N70}1<1Hvx!3r9H z6pzbCSgsWpED*+xVM#DISSgGFe+YBw#2fE5@nPrV*A!45iAYy4pW*nBqv6M{SY?ccvg( zHTt`jy53=|P(0AmU3xhXGcY(v@urp#*msSKAnt7beT2*!Y=QRL;2DH5U_UawwrTO1#}g799?D6-Ug5Eq$NG^^~K8*uO8*iM}Ird>;y- z3fZ2ul-4qX9>T2UXXc2|4a1DjzX#q}e_i$6w3@kI*;IE}gBJx01y{r&9xzeLg*h9G zjvHUf@ulOXEWLyLc3CaFNhy=K59E5I^rgWlv9{2yuj+&Z{h^-$*f9;aa$tb%++sANH;D{ta*y9 zsT{3yXWSCrt>bugtI=?b>NLvx(5OQ4pYvAhykc1NW03>CLwXo}ogY@X z!Eb6Fcfr~~{}mkIJ6H1l5zhRp+g)20W;ez6eobsCd!qHx^vS55+YZQzef~h8!J7rc zI`xT(%jI3ihJC#Yit_sY^7Qd8`*rLPr&cQd>(CxlafgMeRxk6trT6<{<$a9hd)DL+ zM+iRBz$7lLI!1B@Qz68zdbwB^dj8O*>i9Zx8tJ*)k#;=a^DH5e=-&biyzHk^;0f_( zwSvfD9syrtdMB5ZF+Hi18<5>1*8vxkGgN;RKR|UtMy-n|O1_EX@qmTH1Z1Dg1Mi5{ zA`OFk3GW}x0kz9@EiAvHKpun_x!QtVBV2~9gK?uH>WXa!NBKH(?`$(TD)OmBqpq#) zXrgEW<#fT>p~;RjZ-+Dm`tj?Nj;yzdJ0wMS4vzXFwyFtnda1atny~D>_oetoL=q(5 zZ&m5$app}U5Q4MW5vv~^mlj1<|M^0PxkhA)@gTy0@6(JN`v~pkc3;UzaSL<*IudP? zYw3cC4qtmgUj5|m)mQ4_kbp+#lg%iQ%0!NIJ)EiFhvHSL|LI-+{nryqmlM%8YtI*G z2?xikilDaSaltN(8`Qzr1;<3(K4cr;8&6DM;DZ>ZeQr6iX>1G32{r_$c4D89S6Y={ z1=}WSz{)$RZs#YOwH6XdHjtA5s~=D26?k2v3I)o1p?%4%%3t2~>1xN&_3BtldpaX(}J{F63= zR*YZ1Xs$jzG;nVCJ?Cg9P31m{;^E@kcNkYyDqUbDknNp)(B;!DqG9P9tK*oN(Z0ov zb;^eO#vF8Gs2>{bn*}pU|MW)PnWiXEl_`QMF9-SLGz#9h$7Ixv>Mc6l!M zJ+9Qb)MNl_kcuf(@aifxfyPf?z~yrvF$Urf|7NIpEg| zlm6AQocJ}O<&<%H(}{CFl_NW-kxNPQJX5J60t#h>&ByiYku2NO6OfrGHDQ9kuq$X* zu)y8mn?!rd2M6YusD7@O z)8DG2SZgjAo3D5u@N7KS@0zWBTh7+_{;w0HvS*0zrtWv&e99;SIWZ)lvZAO9nB!-9 ze;rbB0IHCeUDF@?BMu62yb>cOUf=UR>U5@p=ljTvk($1KgQHNuEItdp)@nc|8i+_d zvLDuNv?fXxdY>z={B(Wyi=srkeP8cC^o4tr5A2%VLJ9vG*Q)>Z`k1~d!x6ukJ1E!0 zDmt@{c9Q+Ooy_N)=lu`c0_L@ZuR02i6W@J@0=5e0w7bIth_rE^vkd*|@LUCf0_kmT zpHmcbL{>D!+Xr>CkMi}KF(VE(D+vh*S;Sh4C~EG!7yW)v+i%CXM#v_@U(bkymD&fg z)v}_pdt{HPo3+bo@i9c0k~b5081cA;90#FC4rsk;s?iYu$~L}l;YI72_&FKuK{T&h zX|42A*3Xl--QbBDLLb^U0galCm7IzZVmuE!sSn#k`b+vX=11or@^TK$=;4zvAUdTq zMOBy|a*4KD%V?fn5UVVYZ8USdZ=R=Gvv5B}aa^)927NN0`XPvflerJSh|)ktX!4(N zv4DMp$HC^hHrRbVwZBF%OK|HfMkBusGRGH#N^5yp(7d)lQj#2&U%w)613Q=qT{kRK za9rzH*aJBW*+)`JyKq|#OVcT6Cd)AoUp(m1>CSCRuAg_YT%(zlm=!Iklcre83`vhP zg-x%ai!b`&b)Vm2Qyz7rPr7J7sluv_;OW_wI=<&b52@?#5p|mTs7iId7a^ zAE?9{?q}?A*>_xb8t%PI*+{>KgJShk!=qOL*@sfUU3Q1XYh?wqFQjq=@ zTBmFMro`&YuZ(`ssb!_W={+o_F|HcBEkXwnds5Q}BvTYoiLeT+M3#oZo@acLC6{}g zH;L7TmF8H%a$e?&j)uRlyj5E1ZWPuk&(|*fdekGTw9@L+aK^jQjM?U>Y0_9rib8{p z3W9iTuk*!Ci%|+q@y#2LK@@TtO7)}O^t+a|1q+eY;hWZ7z2t>H67Z6 zfw}2kUT+sYQa-;RWcebnk1G_$va&z@)r#%Ufq0mnQ82&@LZp-J9g2` zir!SGN8)Qip+}4(XFY}axa85Y}+WW7QZm(cqY)NG0T1Qtfsoa{cJcL6~dj zat`+Pq1&lusCeR&pznbxDN^Q9NI7L*dC8;Ze7BnK!~((8)JJ;jE8OA>hd5xI=cj;Q zt3aEY_PLl?xn?WE5G=V%oufJAj}&O=n4Ib*=3bZ*OZIIi7F~h9xKFsH4KxO>goil_(@`&6*QGSPC+s1__Bnt?~NF_dH~tE%Dp|@4-a6<^JE{? z^mt${U`Am(5JTHIYJVll1e2K>>RQJK-C=w%UE|iybts&bEkSCjE30^PrK|%c&=tq z?Dbf!+f-*PtNx9?xB7AYkw^FFYZ;!``cR$jFrH~uR^ry4_obIgsm=4HACm}6x0v@4 z(+7Kgcr|HRSqz%XmTu@Byjcd7&J5e0YrU!@Ga3tYmI>dTO)`gWl(skG`t)!yR^gA> zbYbibKE~c<^{0IO5+acr8isc?j&Ci?7ggAgv{K~r-rt|7J&YU}GTu|6aIGf&tf7p> zo)$pF5CkjYAv>7S!pm}JJ;ILUG4i&m_0IC{xJr#W6WsN{Mqr|`^8WM@h2uUmb~dJn z-D=nFz4gQM{TR~iq_N~5-wpl8aA7ABh)KlAmq=>+9(PS|#s%^C)qfZ14KMxj6*^r? z&oKJ9SKC}$RDf-LRZ1OS8+T;(hkZMB)=xM0%k-OqhxTP&xMz|pQWk?K)g#{GRQ}5; z5)OkghVC(e*Gqe2`}nlphNicXwSE+yJ9%Z?Lun=tFxq@4wSh{CAU@^;Y#YU*)5`w& zG~$HEh`PF2fOhKjlp)8YubQqn8|>-GQVbC?6q$f6Y1-_DvE(Gwbq0Fs2enD7S-3Ti zDjbwhFKHLS(54^}O(4>C;fL)Z2uv<$Ndf{b^ zdO&Ey!C>5YL)Z zAmb0JW7(e)pT`b-GCI0&V*J$!t+mAo#}@V-q*_o1-eeV1Adkx9g5#Vz9S4KAmNzX#RqMV>e z)k+dXSc&9pRU3W?a2x>=k|4!8z7=ug=Fz%n>d@W^gQIlpIXYE;8$#2@k?D$WHy*06 zLALUinE7OAKUJ{Q*&rbbddYr?$VF$=+2Z4STaV$oFEpq%yApvgJIQS0mIF_8d^LpmErG4_0HcG`FWVk;27J?yS?;U8KUWsa@v^iu8BBSH zV%JvgCkb8H^*3>1Y3x|}uaw$Ao2Rg2=ioiy65zG>v_K35C;l{Hf)p6+qR&?!Yi@?} zGNvCzUx#%pzELyyR0m-6xJK$-^mcw6Y#uJF{)if>bwhm8U`J!90F`tS(r$2cxotW2I?>aI|LOQCcSAUTxE1K#Nb)$Q4-;j+I^3q0vAnQb}zf_s?d8V^tr}I8)b7*ux zFY@X|vW;(_Yo`M0fsJX|lbPJ@6Bd0#^EQ}HnB&=)>Sr>ZgP*jVb<0Ll`74pd1$3IE zZ>t69L9aK|Y$;Q)*Qv|YdnVZbIzmyb-{W6^q|XINv#IaHiQ^f?U$rajo*Wyd9d~e^ zojK|QZ?bx`Fnrw7XfTNES*ga=Chjb>I9w+D`ImfI-E7~5<&C(nC%#^P{zIcb{yeTp zJzf6@O(k8w;rNLQ^1MMO{`O`y_&pr($~Wpy6$l13EqFx`nxDf9>*5z0Tb)qZ>u&=m ztJR{np}VmHk)&?Y{LXE^)_-?cuizXPg>89Ace%zsD?<1!_69;|<*(udsPkVP9Bw1k z)6pY~CC%I(PsZS2Or=F*KO-HOb?4q@BYF>ax*L012Phe4n>4<)Khox#LL00zQz%n{ z)zX{5)UF>NSMCV-Ci%!SnMo$Qrof$ez9{gyTmqGtnQ8*4%)3e5|x zpj$8p^v14|gyj3lJJ05R8Bl$UiI|pm};C!5^W{= zN9Lu5z?=M>cX-i+1aQz}cK0lWr(9lMN#9qR{&-qs7URO~v{S^}>~c_V zZT`)Lv+#K#*42!9TD7TXZ%<7tvtG z{kp-)3}-`b6y6Z>^z?cCYi+-b=V7fQtGR8d3BDzH*4MNjVSw6p2kqo`BmP^0f9nz$ zI=lAY{BvjxvL&4Aw1n6w&Rl#1|FOpPX3%Y1ie(7d&i`sF;mV~Q)IvcTt_j~pf1*26 z(XeGQx23adlUj#0PH4B9`o+;1J(te=V^4*`wCHdq(h9 zPga}^$&~1GyC7QrW>O%S_*fMdZa$4p+2ShlNc(6-|YJwckDI^lPG7S{UhNiU|C@is%!Uz z=71%F`{stv_$ajqokn9slEmY^a~Wm}3%x5MX(Z$xR!T}rf6=9;aC%4?rXt_5sjRsx zmSe`K#-0n`u@+?|>m*?SM(*Q%tT$4;(+bSV@f-v%lJrVR+x1s+OHNWuEj;8_zOCFu zmujA`McVkX#FsQbHRnwpwb|MAzEO41tYbH;v6ap79Qj1yZ&`rXqc+DUzbz3Zlb{1m zM!WX;3~$;p`!4k|o8D5Ey9dDsv>-{zew`Qv?+NU=f^*plb<6g=_<-GDwyOyn_Rkps zXtNQ?JtlaOOT_{MJHd9E(ywJ(`-{Tjy;b^s7Sl8fUOGJnf|b25lN)nhY@1#++I;~e z>l@EI?*{UgnoF#Fe`Iv&<>3rHHN^^K<9|{19Z*eX-P($% zASxgtDor|46$GTCQbels4hjM33P^{D4$?uobfrp%&}&c-B#|P$2k9*YLx+(3C(bv; z@6OzHe^<;32_bpU+5PPOJV&69l=bNa1?Ev=BeB_~Om=C75VkYiGZ(1$h(%JLJHboo zpg-+|!W!3%>@wBlX8mf(CNIead#hw=@_QhL3)@2Z?V3{lMphIT5u6jOoBh!}Sg0tQ z=1P))W_D4qK5HnScWd$%^BCgb0Fx(xzQY~QHv7Hm zbb-h+z)k7%k^;9ixO7-mQ{zLPHavCdz-0^LdG{hrlGji2Qr{y8^q`RpjqBRkq8JT= zFVnEDRxo3fa;fL+NbRfNn>chde_0RS%>H;)hD$;Q8dP~#W(|4h%hbmrSk0z}%?zLW z%sc)@;IQJHS70UUm1wgOfgA7eEYdQR6t-osH>c(Ul)uEfaZ`+MAeyu9HM2P4hlJcn zKY6H9@15=$Aox#UAU{E)B87}A4vdWJe3z$b2GajJUIP7dRF=R;b&MPjOPp~JGhgS7 zZ1`B_(_=PnRNYme#c6E10}&Mx@9NC#v?<^TfqsYr?Ah-*3$|tgK&^aWnOVg&7^@ooISzH~XYA(hNB^JU zSt81@)&;q-WwmMIxL;Fc)EjkqA9hw&rwakhhRgYR>DcHy)WZOo&Wd<)_(}Gm)bc9z z@woe$cku7iv+wwWHpJAZ$|!aXQjHy@87Q$Y9YB_BXdE-BbIFNMNEn-_b8WgGwWpC> zbmz51A|n?0ElVk)^Q4r>i_Vj(q_tq?DWdaqFys9P^1Kj^WNwlR-{i0JLbcs5Ar#|u zSyu?d(4$0+u=<{45hT@K{s|Jo%aarig9W;H+DjX(P=#sP-K8be?<*Ygy9tr}coNUO zYzr_;t`qYlSrc&gZneN?ZWDdzoZIKnp>%Rn;#Aa|(yPktL&El4( zIGG}=SBQJg|9f{#B?IU-|MQ?j{@y#T09nw76wi#|w4~|659*s&N&D>VEv4}46_tJc zstRzp8l%U+gsB7Z2%B2MV{PPnDS>K)f`a0R{n&3lm$3IVz4qr5o0iPX%$#&}brmoA z%X@;h+asEJ03C9GdJw0W|116u$`d zuFK%Y({QP8jSJnG4x}^Nz~a+9rj|eZV*%8U@M(F$-}i-FjcD3~%)#9;&>qxw9w_!V zW?pb7taIk?%27`IMF(d1TLEb-UDT$e+g>nQ0OdTdW?Nm#F{}ln;A7)L4R(>|nDIm` z+nsOt1ez1gZ$i^emosy6@^TKj9WEMVu9rzV@bsSKq_P^&?oxCAL{1iRDS)uGEU>!b z7WKyQ_9L{&X@`T6=xQswblULF9hZ}iRbzn^I1U{wE7d*HyD1@7pR?Y2scw#xTNZ1=YrMoZj01nT6?Dxq5!6|TPTaWf}|J?@Ee%`qjip~t|(9qBr7(-tJ`AsD; z2H1V%>f+MaLLeSi)(elmSR!l?p^1;=$u4V^f}SD@ci`fiO3xnz{YP@!D8TkTWK#YI z2$X*TeyiV_1VN3YnW1G+l-|hDEI&Y9-Fl|Ym-n<_cehJ9$LMu}9MxXG{jSkar)f2S zA9L9M6zG3UxJwxq;}(=qqZP(TGATBuZWu_nS2GrADdH|v*k+`)>BvqKIdL^HtWY1& zh&->Z84GSdqf#*GEZmVdHe>i%l#>f7P0cZ=QLQyjm8fJMe<9!w$OL7%9mbUZs;|C| zYuP8oi@Pq20Js-NFodx)psnF0&ipJR_yB;Ol?ASw^%9#TvwIE!bpKADis0zdw=8r) zVF`>lbOd8-R}%Pwk)PtaKp>8H5)qunq%TN&nSmmvd+Rx|5n%89=8mo%vbC0))b#;O z;l>T-B+ln|rn$5T(z_Fu0HqykYvMH@FC!sCLux>r#lg1lsE=aT3ZY`apRq8p?>Mkw<9k?v=UP}vGYTNEcoJH$Pi)7zI31b z6qt^SO2uFLdD8SpZ59|q(hrjg*W8KS7kC5Z>8SMgT7RE^4|XJ`TZem}-Ik-_8;@4$ z)eY55n;f}iAK8288a=?@Lh_~0%Ba9@4?+Z&pRKy+OjexZqYZ#t=kYfc4Jb@|05m!> zoRe=qKVCMmG8JJ$D!>%5o{)=FyKE=(96@Z;mK)ZD>>tRr(5IIsh0l1RWS@;T6kzvIeFaRSa zWhTkIARk{P5mrb-ORx3@_4l9XUI0Wq5nux#69sDkT9h%>-so!u>OJXGN56w1UQ;Im zpkZ4;SM&c7DQz>y=2|qFv7^(5gX5@-k6$Yj{Z04WoQsL|$3K4qAa`-DogA^0^8xFZ z_LOc1K%*c!&NUpH3uCkt0{atl=BN0g8DtC%$OGWy9TL}nv^nSA;kd~Ntfc>LdT>eQ$f zP+a}C5#1M!4S4|v$I!^m&hogpi-Hq0kC;CG7RY^A$z!%=rn$x zqMOfOR*BoXyV|EB*4&;!@@q&GC%;xNJ4^Xg6wfPuj&^)K>IiVL?H6$(JnvyIBKRq6805e}lu9 zA9~FBvmO9+qfv17zM8>vOmglhGm|w#RO95dGKnhpi$zn9$$2j2Eiq zoGXX|sf56BV(@VyrrkBwR`1g$gpm>JtqqG^6N!eImu8iru!&nqv;3yie%wW)zlK9toL1gf^57}fg@TZ$WhC>p-WJ`Iupl0FKJywI&oWasvwf= zEG_tg_Y}02|BMrZTc*jMN(a4&(cqn?5W*s__%ihC0CBiE7HlkXqV!VDcXe z{5TruGr@psV-d6?6W)hk+|#HUDZ%oLS093s2A*aBG=>X^@M=+L+4W66?V$6~b{tQKz5M<*-u-g0;#9(VG6b6Of>f?L5S ztrbKuyK`lHLU;wP(E<78>n*3$=PLMnNrio&JwEt3(f1vZ72y61OMl!wR>yGM?{EjL zXJ)pp;B{aJ0Pp=dUP?u8c|}#_1+IRg3SqjFPJT?jnrqD0`EV<)yABh5i5J37$%?6f zofS3Zq`x+)ZFUI}+$cMeNBWyL+d5{vrbqVX3v!bC2dci)-fyL%7{nig$RQyu4eiyP zE$_9S^1oH@TvBnCv71##SI6K~Q-oa$ zAU2TECv?xFcL3Sj^oE@vb+8Rd<+(|k7ZOczpRb*y)+ILb>hw)aOx#h?`SDfl`Cmhf z#ACYct4BhteQec1t72{LtjiVH-Tj0zbI)_TE8R8b<{3eipH?-qmHo5DbZleJ0RX^-u$44}vr4%k^I6RdS#{G3wB7g> zP5KFYSWX%x8yDx7ckVEJC>H`_MPf3J+$81l(}E}Q+UnEhpwax;>6=>9sD`YsLq*L) zC5Fvz$F4uo{$teg_XzBdz)E5HN{gkflT*2(*mUhP=fGl=--No228_B++hOhrowB{# z<%{=(hlMVZs%Naj`kmhN!Rz6M@*dn@cp?lqdnd0xSK?Hi3AxCY`URRB5jyXE1Q~5O zO+A*fTD(2?klvEZ(at+d?fTcM>#>~1M&8LOQu8aNKZjDiugNuV2f$C?@K*UFi27*~ z*HY@n)6xb_sM*ZM=adfFNOfJ%Hdl zvh{aXUpWTifsWE?;e^6Z$1cBu$k&g-T;Yx*W=`ArG)335+O+zFCX=6AQ)$uF-}6qb zi(aC8#;8^DfbTq6>b_%bz(K9agfn3)nBCqL>%aNEOD}cGiQ)m)!R2A|Jl(}TvWPoY z!N>#9BYchIA@P%FWzc+o=?q=CTy10)JQp%edN4CQY;&SV+2=pDx@Ci-3`+1P!u3Q{ zbE>G@Rg-_7EvkK5y$>aO-#P>( zYdN@ed#ZFZVX{nYk6}reNjboK-%ycCNRAa3SB)yBNmwUE96BIItWw1MG9z50?y9S3 z>_(HmN_|5mr=tu`(T@}JscR-<27gJ@Uxg)ryYpJ^`SU6;@je!SO>?(Eolg<-fOVxv zk7{Xa^KpoblHY)_ zB%%&Bqt=rCPcn*)>OPg2z%0nL^e_By`F$ zCA~7}BdrFccDG>M3`7ks%TY9$U;xNE1)}9X( z{F)OP6?6Mdw)2GlI?JZ*?BNh?o z&be<{)C~^nj11}vR48n!1l<4}3cpBDD-qxKyPxlc5;v0%0Ht;B`C`CqwIwt#jX`{9 zMQOp4%deeIGw@*=Lb%8WkaFg}=yFafq^wEx}8f1WFR_Zl0-X7$P>Z!ny z>{ilv2vZ0A-gC7@1okj@+e-SYF!opE1B&DV0o~o|mXjbhF%QFP7e6&grkz_Ht9@3N zFtg6W#TB)Zpao{k;jTMrRTu(lYHB&%d)!*2YFqEwOt$V&wFO?!}-+n082miaE%U_EmCTX*;=)-g=?p~&A2s+PP{VM4EXvx?& zJ+lmlq?6kGY|zlK^>54I$UV2*(^uCTsE^A17PU8br=0DbVEyNm9Ob*rQbYwlCz#8$ zo@<#KeImB;=@^L^1ChrmSk9dEE;Qn!2`dZ)?BjoxrY5K5OISmf22)jg{j8|%r`x;&SM5hflMX5e3AXi>&g;J#{z(Y zr<}_V>Q70UJ+wT1Uf<}!!DS?~`QtzZnxe8Y&0T~*+fB`6)*Gs1w?supOQe0!Uw+UG zd3$F^sCT)+39KNb!aiIGQob2vdh>P=gN$cOB(tj)rdI zfJJ$EVaWw;r{wyoYJjOGyxS<{&6rhxyUpm=n2?0oQzkKrk*b7SSBHJdh)G`H+HmtZH`%F(mwp#MV`15T$za^X z;$5}gZgz~N(wf!tK-vbb#)}J!hUF<6x11(wZ-2-y_?wvr7jgvHx8cy#%0UHAs_U;E z$LhPPolv%e`P#(-js-c8c|#-bNQ$NJnHw~d_kIt1>$B;x)v>X-h?QL)8P+6aR#=fAid|8`wY(V*Q0;tk(NpGJq zX8yZ@36@JG`gohux26hO11BDeSss1)ac2BkW~=+~+nhPIvbj`Nx^HZw3gJw&&boSj zM6tC9Cfhh`E;iWNd6)*OEbsE$W5(5u)`N{w-U^Rdt0QV&-W*Lx*h%~*`})JN2a3Qj z|E%HkpD48Z7cCVhy_#k*IxxqM9OJ1OGVq&Jrb~hkDMm;+vc;-5a9KyWe9KXiV2NdH zz$jg^jZK85e0gID@?m4W?#n7YU)(S7*e65`#5&I|?G^H{ z_6i?w|4!zn4ZK@W6jMc3hequa99L0mOwp6aA@h+}h8aUpb>}L{%^q{*ufMKXl%HKQ zWqz{emiWA~fh)e2obtJduHNd<)?9Q#Qi4M)mwH*q=fH%Rn3-4l+9d$cJu1|Se>qSN|icN892$UxY+j!f`hC83XTI~)X0fUuEDx<5asz4U#L_`SJ zO)IJ)@3%I(^UVs0=&q{VQk7k{&8E=2%^E^}=ZDPla!*h>vMPm<5j`}M8nvJu;U439 zUoA&1$zW}Wm6wMH_VVRRSO;*yUD2rO zm7R-p-JeX`a-yC`?}1!_o9WJ#QV&wg?WOz)pdpE`kF|2FLrv3$MpsxdVtR%yKE)AN zMlC*%s!uUztg>NjAoa?c-oBU|ae0I;>El%3*e}bz;A#G4`~FLE6Z27p(DqrqprtQRDgZ#+q#UJY)G z`dLUQEGjA2=0^`rjZT_{KTxlV+d3;Inm98S73~y3sGf17Lqih7>M^XCnks_^NoTjp zDkDXs0!VhA;iCW_BTu{m^6S^it*x!L!@dvgtQ&;UhE49K(u3w%ec`^ZPbwp=l&c#f z99u+F0$zM1Ww=3gCOd#Hhn+>OPlU_%LI2aSSOMtPRwenZt<*V9*fH`5miM*^B#!SO zA@A8gUJlo9vNQdYC|zMbm}VDLs}JIp?wP1;H0dj9bfyg(jA%61m&XRZ zb|8M*h7K7>lXxm!9LRo?`}bRm7RxloNn2YR0(jZxL7OFSlyr4gczAdMqziG#t6$3R zcj5$OA4`k2Vy1B6M<~l`;L*`7vi)Q@8cR!vgG2WInb>?6D{q*I`zQq6>@-<#O)@Z+ zqa_m4Lq!(w*KJRLgnAE7p=`Evi)8!Ws=?+*r|f;BstzV-rqsb69_B}I8x`-<&eeCk zj?{CsW5>sL+2Kxfbxc@SMUmyF>4ht6kp0$jg==FDaoE*b+F9NTS!?NrJ$8YPO589L zFc}_49HzV;sw^}pGKkiF(e3$c?i5Q|FuDA+d0SvgQQ-fw26ALljwIX05{mStA4vjF zQ!+WsZU0cTa<)2BoWVHmK)S>{`F6U-!q7DdU%$pBEsT@rrohd2L{SoDjWu~i=J4jp z!^6XIX92@Cz8LslmkcidHgy0Qj*B&RSTa)XsBHD#dNbEf;$aP%8|CW&6(526Uoo!B zIa@SppjTuv?t8~ZFLu55lgi7x*GRi>2kfvIY;M@w`DUv!O&PcEitu7~83cM7p6J%$ zUcQIzH*O-p`4S9^zakhiagFqnEcmD}czA5{A7 z9E~{xj*XuL9!*V6?O@_5@=|zzl^742;1<;6e9fE|<^&qhV@Rs_4No0G7SoUBQzR+l zm68YdqKZb15G(v$!`;O^$iq`tqfw3P^$IJc08Whbu0=W3lgAUWkJj&3cz+(y+1zm6 zvbcgeykCLE)6Bmd%rU8a^v*o!+We7n+>g$xY0;o+qdCI@;M!$ocITxw&#BNJ!WY|0mRE)mr*EKE+4 zKi_M~!z$NnNlf~obvtF^a{0Psv`TNWpO1c6a`K zF9i!#0F-6R#p&Pm@gK#sMT-l(F@N}lKsP~^tyo=EHNmt2$ioi|Z`3+HZxnMf9~-1B ze7oqn(y5`uK6o{s`8R!UZ+$88u#`?_ud1*xX=Ta)4%K(v@^;xrY{HOw!)H)$81-() zPGWk|?wQ4#YRZ_3d5gyNJSMEkb=11J<~*o<4U%H=pKXh;c!If9gRBSZ0R zozDO7_m2TP3GJlapXq#hWK@-(-zg*^q{ZaD7mfWA*?}x+MIxFz@j`VjmPzZ(JjesR zc$n|>hc;Bv7fhq~`po<2R3MOi?j%ZZJ=a`AM`*p_b0{pX^P7v+jHaPzed(5(^uy|+ z+dAFm{n`Tc8<{Y#{ss5JqvqyjA%gw4Y*jIXA831$#K3I7b1$M6T3ck> z>}=)OV_?`1%|3f>C!x#!5ZPYEiyW)6M@~-{SPve}=j7#aVf~fpTrSX&Oe$X^fqEnu zYcc-iUH)`4|J#G!E8uE86vf$T^8#8xvq;k0x7@(!@oB8dYVNE_YBI^*FshE?6Vy;W z7uQ#l%V1buz+kI4hcUdV>FuL`>ktboGK`1sn`d=0!4+0y)9?EeWrKz^GD@Z^bid0n z^43lCm*{2pmwj7XI)+0<)#jY%6Cm1IIUe4X4rVv?x>qv<7K@qr=!1rC9cmg@SgRRM zjr;cBZ#xoxs99$X6h9Vx!91Q{jnMx=#6g3I-+n zVZMjJFq)ec(fb}u$vjb!TX7+fBgl60dzj1ML0^rX=`xz08+jk6H}Cv)lxw(AMj(j# zAnHVnIj}vK6IE8ADkXe zobPIO_-wIkt2Zl%ENa(4IqxVgb zhr-%u8IfwZswfHJduvxu`74O0+V#XzV>5xN`Z|*`h$Y+VZagjTTC-ZV;d3~-rpT%8 zdS(0-qB!QIflKU@T}Co?-k4lIDmzsO4Ptl%MhSuzm61?EV?!xK*g zb#gFc z{hDaemsc_NvkxO2*KuO8Bnyd=479}kqTsl8iZO*@Xa$e0x$>8UD$~&zG@zxp1;`O0WB>HV5;8`|a zxfV+R-$VPthxFzXpTi}hKU~Ez&FyaS@%kZs9++cbeelO#%NXN_rw$_VYxe%myUopg z!)x2$;hSsJyBdb1;vkHeK1)sY3OF*B+PZ&AG8wpl)uJ z@jFT#icxrJM;F`@YOLGIMPkM&eMC$JUD>lYzIfnw1bOGx40oAF2;IIt(riXx8l@&z zXZ^>}OCl2JcqDHO`;r6o+oS2PtxCL|)#khVb3I9lmD}FDUzz+a>{|6jsvo;;y!_P< z{EbT#0a+LP(~b?aziVH^*n-Hq?b=Qec}D7dvdKT^87)6ZcQ7dPbkCU(82Y_) zyb_aNwxN5o;fGv9sCCzvE@UwIW8d(7oL@wnv+)_IabXX}=BgL8VE+`Rvm9uJ(3G$6 zNBJwvS9s~g9C#8<+?2&JjcO1;4**iL`3|-&4A5bfRJjN?6&7nAyM0q0FVh)a$x+IH zlhkihQyrSW+hziK-&zJK&oWgtHIZ13ym5Phs>`f!5czWhb{0FX!1HI%-kL=>pZw9) z`Mb|aQvm_SVuG5M`+0J{s^in*@@d9EzHX7OwAQ!$RDGvw7O7VAB9z_j9UbHG!=g?T zBP9aWw%YjHqvs%RZ@vEK&C4hAbn*kJmkAB)&4sFrN!&^aU2)s=LsbofyN3sdMt7R> zYIb!JjTkV)f=f;dHyCW2KaX#gcZTF!XK#1t>&-A< zxngVCp|!8aW~ovh(+HSG$6NpybDWA`Q>Dq%k0;S>y=~ueMucJ(!ItO90a?)Eo}at< zi^{9h9H$Hh)`atIo$5cysvMcywEz$=USu7d+?~)}Zy-#;oLa|V_{J*8tQS3^{{41N zO-JJn;cL9F3Wwr01?vHq!S@`<=E+3S*$oN=LWEw0 zP*CMK@wG-OZQ)E3lBf?Y;_M(UWBAll{7WMDZX8r7Fx3}SynlK6&qQxJpSl(_R)zYx zVKuKLUA|6dUSCBW+%3yWRRq^$Y0!jQL^19nm2{);!>OTF)1Hd;QyprGzD9dgtKr;Lkk4ML7A z88qBh#}qjWuxtrd7rRrVYvON-+Bs`DFFR%*ZQr|xW-rq%)Q^wvYJSxEx`?M~P(l-abU}I6*cZX zj>@Qg@p)F#BQdB(+C=euczkDcKDNll%+PG_6J1r|hE2{u|1s%AvVilP7K46wSH3F_ z$kq~C%KftUUq^C@yulO`N7>R3F7a8j5ZHwK^g71!qCGfp&?BSvuQB`HW&SohLS^Ss}vvD)La@`*J zbtMdutOCzFYl>@x&Cl4};>Bi|9~>U0IEyW(^*~(zqok*a9_C|UZ1%4PJC7PXZOaj} z^)HTRndRxm^hbA*fy3r!&UJ)PK+c7nn)E>|eQbre zI5d7$RZpBpdLu2^ib@^ik zKS@SN+d6^qI`65|i0QGxdC+gnPiN-&{h@%0V;x$YeBm>dK_jW^3N?+pQ$vH4+b|un zt-Aq%_1jH+&Z~%Q4_NMlXU4UUJ99~F3w-mu(Z6*)q7@a2%c4;N>8-U0ei`V?=b=8= zsX<7;a4GMisjsfyjCo@hyEur94IUH2_Hq&p_g{3`f`$0>Z)V-+{#$~9DuP=#M;Rcy z#*SD<#hdyh>po2JTm8J4xG6ZMRm9{R)d-z|75041&CNxMVUDcpW(z$MiWkjwO1$tC z$RUdA1Ta@CSeTuD*?fvTpoBwCk&&5M5X>wHMoDd<^`^~5MYO#m`FM}iCQL?UHT6Ua zk);dH96O&wyfO2crDDo`Hb%NF?zDgqje~&U@GXX7d?}RYq$u>3%r8&bB=IY;n zeV;*cdP(YokcewG15=8?VTTW*yZoce)9DzYiup02TewUYfDc!FfuOaj9eEQWOVnM9 zXsX~A`290Nqdynt_ay@N6?fj@Z@PGU*B`b179R+_tikJ`AY`su3gTdUbzF8SJ)&}s z>35&~KkxIRT8Iu0Ohp9m6-i1*P1c8u-(Y0#4g~SY3UQ=b^UVqFwzHT_As$s4QZR^&Ne$-R;&_Yy;TX3 zHM@#D67J#Q6|Ao|0b@gGly!tO+Yj02)#7YKYbX&2qEhwiA=y8F`pYD~08a~WXS4t6 zz5jeY{Zs(puc+|_Q)+@o_6dpjL0&{}`+J!G6H~pZ3@#*(CZo))KC-4mWPo%KB4F|~ zQn=L9x^5xa9tPWz5??5zTH++pi=A3dQE``tvhYRWxZVxoJH93rV} zfywnvTd&ls8|W#wKIuQMt0sb^ci+7t^F53X zLV`}#hwasVj ztLF6)X>Kl1H!}(;+fSfF#l`=gsJxQ|fhwlkMWx^uJ)KSkk_S~UTg?5R0z`b~X*>A@ zzVB@;*MI5#C&cDwDx3OLgwvdKqtHla)0)uUT6i|m+mSKDzn~ydSKryQq zFC=u+ES-=^7iS}VSWu^dAYP5BhMa2SX0>2^kMD{G;&tn8oW0nZK#IP4;zxXXUw<2b z+vO6NVLQig@5jAZB*OBvHzFWDG0|nz9(kZ*ZX(dM)d#EIUr79UhD7ymMaIS7fHt|l zlotCJyDqH$qJ_xm7B517nr%b&+}MP0Pd6DZi_H?!QR5Mhrd)qQiZ3L-k}l3cDv@-I z`Z?$^T0_;8`p%{wBAQKo5V_AmY-~M$^fA8h0Vhp zjqG4U>6=R%_aMXR(?yL8z?*iFu4Lymzz+hkbQyb^qe37>45jEm_9EDFc01qpcI`{@ z+B)A|eE(x%TxCZi-VZ5wosDivbI5!L2BIG6!n5bjrNy()asIx^c^Tk3`+IKb|60aU z0GSaXlYh*L@1No?ix8UIoEy&I3L^{y2>E(JWH^m@gGkDNbqT6nH+%Q~NRaHH%bP8YPf~sjYbn-k}MW~oRn>4KT zaiRwLSWT`IKv*^355w+95u8S)nu<-J%c_R5y?neGf?mny&u)R5UC1)b5@j{o@AF8! zaa#m32^o$D)7b?MBV~{Lhrc}h{Sq0KfRK04Dqpzxi;$d2_}oPVmtllgD7dtxPmtFMQu3h&dD!qWaMewMujza>qN+T2@r z87{7gKM~Oa9i-Jy*c5X%R2`Md^19ji$lJSM!gp04A&tT*g2})Kl5~^-t}4@STcF6r z$TK+Mam22vdv?*Pb9#ijUL4I`H=b^ag$8*Ik(CtBerD;s^UDdlpgE!p( z%pbvt(~INRjhiL~aZ1K9RkMF80)T51;L<#a7wI~KD-1e?1S5)hI+@ga85c?z7)VUj z-krfUc&zhy_-@XXIpE#3{(RyhbHqNH?6qFM+8p=D=qJ{cGHY80oTcE?=ZhwCjQ$&mGBpI$I&Ij%V^%~rTmr(DON^XaUvjTUtN){ZpQz~0W0_EMH z6a)XQPU5}j7We|78Wkjj_hh(D#l#cDWqub+^fGgO%42`}SK@$%zqv+~||w*#MtOII)|k50@b!e9B)q!_u2X+_2X!%Z~%# z{wG@7+T@c!nZx7xCsgY8Nk2b7G{A+x_VxQps^mE}D?GNdE0{boHp($&!WsKJV0ob; zrO&m1RpA|*WgPC@_btH9YmAP~cL|gtD)2JHA~)U6=SH@E^BS4fNA0HDd!aNsqN1W| zQdfPprQjY|(FUh44UI#-o%gD9h?TcKzTG*AKd$2EwWMD<&g{DyWX&9@q@;udz%j^; zji;lPw%Uepf4{@;`?jW1T)$=>f9-0jFtJqpI$dyPbacdE^Q(hZ(c0cl3GC=2gK8suqT&|Rou)&257|XMPaz_@=x2*fk@9WDpI7mZRZ$2!g$37G2 zzZ^g?>Fa5_H@$X&N*E2cprr{(>aSG;=b#z!K#%D%`E7l}ZnRR?5I7;jy2ByE_wL<; ztPT~=1G-*bVn{(3w%RuM#REW%5z9-Wf<8NIw*6o}Q$K-1p!OXwyYJ)62VM8$^}YPp z$~%m1j+1yMn)`cswF{?zJ2P&z`5Bf}^b`;SUdrHzr>AC2{QmvBXlWa+PE-nhFj$&^ z9hS@ zN){)A+~qXFMrJcfR1kGARD$bETenjUvtT0O{X&Nu=y>6_8Tz zM-aaYcz`i>05}E`uz;$p#)SYVPCMNp{=?p#DrQ>Ei$2YH(s(ce&8H#tL!K86-^vl> zFZJrOAKRROOZip&fdx<+tN&fh_G8;`J^nit^E4TI@{*6{0kpZ3@h)+9s=d&2vmz9LdN7N^@6;fN;rMMV;L$6e0CYt2EdeL^gP*SsRq;m9;EF4q6kK>mXll)I zGjuhX4Yd2k>LsORNmLetc+hrQ$vb}NBl0Mn!TBUER=KB}L)};e)pUPH&(WiVwr5p( zj6Q7+ioL4^-(N@>kKa?f<%u+&>U%KX3%{9xLg43)_NO-4JD1Re+!G3=gj$hToalr^1l!T{;YG+pvlO8Bjr~2jG>Tt8IOuBdgkez$`)c9`mZW9DE z)udqRyjSjm2NGz5+D~l&5b6Iz8^|}o zw7v?jF?2;c!%m-(Yd&_W*g&J3%a%AcS-hd{wtMy4krH%E2`s2!2(e9~TycU)sFIUEu8jDP`pWoeo&|ib{_t^&^DMok{;Sh@6 zE@{Lfu$Hnrmh%8^1*9K-&%FSckMVaLKu-59UtKIxB(#e)mv;fOwYeSjOJ6h8)kF+u z0n)k0ei~{&&ArNxhUaOtGmUUjsO<3T#fGWvl7zwOX$!B96lR+KGhF_eQULeWiOX=W z`Zb`};N|ZihM8~K-vfB}41(8wy!n2-yHhhYsu4rV9 zB+4JRzyMh{s$$x!Q9BZUIL0V>=-*+!>qZdZ&&tYz>~@>ZR~tL=pLWOa_P`HDA*=hw z<8DLVe)X7!w5X*?_@|6%ruQVU6*7%nA;%DK;-LhECI{&@Pe)7VzO}NJje2^O}#AWcmYkr>e}F%gWOpXUnHn z=JfIj0nB>=JGDqbc%x&@gF6jeQ_ywS4EvK#j$?MKL~DoGg%Wl%bjI9EbbqLEZ+HdC zA63^h6-y`HHsc~sEvSs5+fyEve?BAf@Thv+$-aN5S6@tWasMkr3Usnu_BV0HE%MON$%@^?s_ zw|w4%5ixE5vYy|J@>)zo&%F*4sQ=KFL4Kxh-2|7Gcb1HB*|Voc@k{KjFNs}0u=8ds zF|OIh-l<%EeFgaI_KVi5d_cq{3#DR`e=#vjPe_;?4mQ?bIr z`G3JIg|k2VpStk-GpIX2BRr-(5%614T}D5w9g4iHMf=DuqNa4KWoxrc{00X*^iFZS z)G-Wdyz}mwuFhz?+G>Ru3EBS3xI4|)OsMYEJ8m*WKV=u3V|l4OIp?^uFltHr8&9H> z_mBZnT_(p>>sp7fT9zG-E?{z|5VIEtJm{oHtbp=n)oj7cHFwqmoiXmY%~P1shG z*?RamI3}#Cq>3GQCBY&$nctJ0Gg-c_%fb1zXDGLbZ9_1;8f#QU0zY`~Uzj(;Os+Ah zA5=vPRbuJe&&Lnd!*67U5gcx2Klm^b%IuDF*tUt_t$@y`e%aDaE$k5Rw@Zt@=lF>! z0D64>8wUv-pZ0Xk^>Yv)UUrXv=2t_z0$s52&uzrb3(5{%s(J`JB)THK5-vA$mrjoP zs!~5kukRG*BPDHS9bcYLU~Ql~HuJ#KJ9L~H+oA;SCJ}zK z=^q{#<4@Mn`LC20aNkYDs}VCks;NBYbC3R)efV5SdPys^rzO@y6bZ-)k-#s zNvvFFB{LM#4<{C@QSU5tkLqE~c(g2GkuXuxf~l=1fye)d{}w`u2XHP~rUB;ryo)>= z4W}=)dBw`&Qff8ac8kvvR-s3wSin5Y)Cu5h$OJ#Hd0yhT<7{iPB_iHl~N9pC=S0Nmiwbw`7{c+;&J;& zs%^UsK9)r#w}Y%O$s=#DgFcY&5`6*i-T0qzYBfWeVCmjK=Lv*Sa^+2QH~WR{JN5Ch zmsT<>5j2tQOv@*tS5x`H0d<^$*01$BMva!!63nE2FG`_&AuXsMpA$&` z#xnIviP}SgX~=Qep+weLv|SkKHQ3pQuQcx*W6c|pa%D5|T~;x-(@$gfeWUzERqw}I zb;MUJK$seKZ0_X2>ecW>QR(8w)kDi?csfbxVw54;J*#mUR{VW2zR?9zVjKgWW_Dcf zJ;oHhC!76zcu!Ej4Zb^U;;M!;PAkfs2TC-xsUd<1)%8t|v2{qAC@E?BWOWPT& zkbONfj@ESbuu5Pr&0)d!Q_|r5Ao_118^2)yb-fON{%zJf%l;{!Z6!3`FwM1kCA1QB ztA$}~3C`u@xRpBZJJ)nHs?PPL?<|OvPadvtlWP>Qt>rhYl#jeu?{T8gpkHT|Du>ZZ zHUp}y>bp6$cW)INL-P0>Dxh>qfU4%Ts9CUUM_A?d)qO8(GHzKA-JINTY)=Z}%0)&t z(LgjX3iG`~pj3w}vG0hUQO_el9AC9}Jm&o2 z1wbH+*`0hjyRpO1Edq{-BY4Xht{~W|o|qV`|v5U=SL1Z~T9Jy?0nseXuPohypfBqzO_(5RfiS zniPRR00rqysUl5!2N6_|PUtNb5b4r80Sr}&(n2qxNvNThklf9C&v(v!zkAR55BA>4 zlZP<#n^|kknpT}>42D@e5=_h|vX8eU?1-~K2e2D7cNDbg+sRlE`&4~D7YEoTRWI8V z-`6^Q+=$~UKf6Gu`Ky^P*MC^d3;h(6?1*_O%RSR+*@=L})VH|*-ilb-4JV%+;P$qL zWu$6bKRW168?Yh8G~rUD*6&~nXQaJPJQQq!oi@tzK5`(==<}WU_sHWY7*UYfF62}ay`0-I8xURr@;3uO395$k8TL9Clc%r)X zE_4z1tW1mg33-gMh#?ZD{shvQS6U=H0_}SC6dAl1?wIELIjCvFR>-mJ^NpnHnG@`f zGwkq)pTjfBA3HJx?7IMKNb^=nbTYytMh5ogHC1u$}@xQ7n&nqNs zw}PI;wa(FXB&+;4fzkv73PQ3_n#zeav)Za7k}Q~%f+oy!|00(-J*K6-;`i69EWn)K zk~)AwgDigTta*+<ehxP^ZTnNf|z`Z{xF(!&^2Y?(sJnIFm;o zLG>=&$AV!_rM9NO(B--J&|DIlXkZh(kJIkQv-;XmLrND&jnoRTCC)Yg>EA5YPag)f zXJ27brZE1UqDGZ^zBJ;pCBc@N0Yx67^VHrccF)zhpG>gmDsi5Q@VpTt+IB0|X-?~s zGNynoQeh{QDk&~#!`hknQrs4{DWTv<#N^L7ULZJrT{5$OIf8n0`~1lnv>g+vmFQ`{D7Yx_w#o>qE1@cU zRoI)|$h3d@_@}R0-|M@pch0DXW4g>!_gz`Etsw5qj_7k0t4hZapjA-yoc74s%Q|F* zI43k6T<$D4nB_74)i^F6;2}7Xq2j{h)>tbWVGI#Fmffi(jLp63YA@xo1ioWD!kaXV z@K`>V`7)*+S8-B2IzORh~Q@vq`bb zNc*Q}2~&>C0dBhZ=#T;ia702%&oFS>+<_Ip*18e+wJn6WO>AD?#Tp_TvhOdft||~u zyqAOLW#mH!k;wh?hV`V5oPi(nr=OOz2CpXi-CmdmaI)w>zw&=%E*5cUIjoq zuR!-1j+_I|p}fqCbLbX-4*XW+i4Dt8fv5fZuICeR6n{I0Yw{wWX=9ktN7qrI$O$Yau2mQ%M{;`My zU!wQ@1T|BhD+#|x7)a@jBR4$wfI6e#D_9itw0Ix-i8C&tyIXCE-Bz8f5~^H+25PR5 zv1_UW3G`VJCQDp?U@ICFYw+>Hp_vtnw1!L@d|!21jHXlCBGtR7i#8$YsqLu6{Ec%e zA`qovMe>4>P|Ln@OlbrE0GypN4hueso&PSd0br6b?_v*fjPm=bkE^b1b_0K-l0k8A z-;%Mt(;Rf{9QGtv!NDW;u8)FFwKQ@ow&ZBvwG6#qSL0b%dT!(3UaEBiE-12L6Eyhe zYaC(joFe~J&FX8NrqykDM5H&Uvq{x}yD>vOJpaK*HCNxC%qFu|UZ%th^z!2jenkDy zc3Q*ekt2csYX;GO8mDk~dwc?SW^CJ8&FPZtaru@J->1?L0WmNXh+Wzt&p{Tr&GR@8G}nUw3z6Os~>vVr#2vQON8~0 zASGrp)7SOJ;$){@XBgz=M9+d%`2JMn8Yaz^yF#YZNNvhf%TJ;uQGN~l35d->*X8pI zRBQ9RL|&-B(-iD#?Fd(!L9hh>5~!_$)0^q}FZGv8oyuAzUZI)kJ?ySbzlbF&&A0UcszWlt*cX7>ALiZprAc=@%E8~W2%R;ID+G3br;fi(a3yQi+>oqq zv$>AmkzOJXo+qCS9t-F_MF(%9dtJ}RT&vG3%DK|b>(9)rMBkki>GXz1l;QEErQhdD zomq zW4TZu^KK4rbQO9pYkk6k?xFhKoK>11MQ@A--8MEB&0TBWJS&5$a=b;KRY?ZyhM<@@ zmY)OG_j_uU25!u_L%V> zoah=Dum z4j|oZX9ic+*gYpApM}e1R*m>;IYEeMy4RG|U@rb|m`C@y9vdGdJ$PIrI?;jsK_=d( zmGV3Ghlda;Qr~`*$sw(O!N$+HJHS_U-jbs2D@cPL-dYwE<&*mq^n)VbilsdMrCKhi zR(%EggYXFXNK_K{SZ>S?b#`a2>h3{(r#Y^B#b-0G^VUI&i@5fPRN1-AVEN?PhS6Wy zuBmmob*~XzgG_0sLf1$B-6G=7@L4t6Ys4KMN7Le??`Sv#)9iuzQ9n?y_FcjJXETbn zoJ0)`8jiNaGIpLxCEn?H;a;-;(Ycq@a_!)bVClM#!*{>^0IkM==X!hdUIDgG_P|2k ztCQ;ngJoTf`Rk+ZPas=zXE~LFUM(Kwhr`>kWz$%{p=ZN?Prb|hkU86P-bYmdL?`ED zJ)kzHcKx{i9|X_FTJv|7Nq=Qyp9!y$v=R%SUb{rvKjt?c%yLh|n=D5udJm>lSKtQD z(@Tc@Bg|#gr_Pu+-noQH#~_KG!L*^e_U36sw@VhM1ls$v+)Mt+C(e7Aa90XpdQnJy zu`zd0>7j_}Jc9>{%~@lL$G5R&+^#)dYRU9%ZkO-x<|=Dk+^3ir*~He6U7nDNrcGy8 z$2}g`P9w4qG-n)fdhpy#W&-W#y{Ns)bQvmk671`HJdWKf4H<@isfEJi9Y@M(V#4lef@y>#)!;nU8yVG6L~91!mgdHQ}Lr* z!&Tu2v*HU(mR`?h41}UXM+Iy@IThEq!Z;LmUD`t_E3of8e|^|B5DF|UJTUWzy@s;| zeL4CcigBv@*vo2cq+B~bkIT6e{_lKoapkoJseZ6&<%=JHwDZ0)Gdk5vc|Df$MSfE& zt->cir?FmOJ=J`z5`-kO^+mE8zZfmFn;l3J)HGt<%HW%CuaSd$K*^3i-i+QOwF7E=ZsM3?bjy%}ze6+bE;E%dl z4!%n~l&6Ue=ZZ73-V&Fk{5%Bv`^PR~#ged_YMO&$Of*S;Z5H*$WmFdpmuTDvxxFs6 zeSE3#?M>e6lFLiJUGt^)v0L@Ul(%+sizD097I>Ps3i^b7 zKjw{!g{}+s46xi~2_@RR8{ytAV2*;$8zQPn+!l_4w~tKPbfPQ$Mf{$e(GFK>Ijd(4 z$yaNNL`SIIp<8dtpLw;N`)qS+iQY|vnPxrW!=(IzZglIg>W|yMszi1jW0GeXtXYIo zM&*^JJe@@p&k|L;uzE{S{|*VbJ4$hlmbv4>nn(0vwLVTgr-=P;@DP?&#ZW{mV_$B- zE0z%~P`ypVpi>p;ykx%mvEQn$Dyj`b`eHaB+<+ex)ivG|b^Ld7;HRbW!ZW{*)&_{u z#zp5{-u_a4?{({BXw2}CjZY3^o|$lpYniplB2sEC*rIE~Yp1YCH7aZXE_8x|rfU|R zE&Mfehl~vj57x!5`ymf;13wLOL=p}C+D${gwyaTQ`AA6EK96ffUYh*XpgfNrE77(z zoM%`qK0Pl~EhK(sJN;a<)&K321!L;?NQm{-M*pII>O9KfV4p0v?ixE$g!PES=A)@$ ziOovjWoVR2quXciT!9GF$x!e0J64%EbD%9a%W#yjn3t>>`i64ZtvmaB8Ktr)Va;Jx zf7EdWcb9FEhyO$qz8_)OsUzV$C%y%2akaLd3ppGl81wgnCL@m|#Yr2}Y;M@n1)h|_ zEy~@3L~A3hhB$6wzO<-De|K-BOdhf`+qz>#xp2${*^h-ef1$4`~clP zS$jEtWNt6#DCuJFfiqM0dffSVT(y;Kjn$YLbI!Iux^YsLR|d}7y*2a&A)VZ6sSp)^ z7GrzBwIx7EhpD})z0p1&ws((tKh|;Jq8}~!Xo}*& zd~Bj^?Vd&n=*Gr8!OC$yqqFYj9pDI(`S4nKm-AO$j^|X-I5gvcwO%uLhutf~=+VbN z^i66bKIWnfp^;?b(-f(tF<)KjUl%VHvS`No>2nn>=urQB^Bh}u!4+#ESBpxa(;N+^ z3(uI;85}eNz`4Ct9MH++%4~cXFi<7M81Gm_7I*VS()@DZbfg+1uh>ztg;&H69sl;_ zXQar75a(TI%-bgTdeGvVCTQ6{^~Vd2G4+CDFJb&}okrul67$`IQWd*2NW>4;9;Mzo z`29_G7GOAQykfgo{Kg#JHNZ0Z)RYUe9N+mKW*fDGz(fprmN_~`EoCdZAW!kNm7nA z_as~RL^BF$e;iH&>5Awb4(Qb;A!M=FQ6XL$=QL3V4puEKO}jqP`_|Tvohu5@ot%c8 z|H(R9@W1bYM!{S}eTJKp+2`Za&W9~|5l*s=LGZO0nvnq96TArhP;ixwv(7ogS+r{v z6fLnL?(%yD#&SC>t4QwUSFfg}JCm`{E0aj?W%12N0X{b|4IU^{PJB)-s?B3LC#y}ZwT*nB}P?ZqK{v4NQZQGh1S z*($ZJ09H7@ZJPB1FGO5MCQ$|NgbC=}AIxnPf|!OPU_zylCkBP73~Ty_r{Zt%7vM=v zFk@;FKVS<;tPMG&zWt@SrQ3l2)!TTfGIsK_=2_^Ckw;O8U8=?J#QkEo<#tPrQQ;tWYJ&)N*10s@mHbh2!O9Jp^032#x47WY5}#a@~WL0 zI=m-S5h)6}n{ca#O@&UE{OP+0%n2)Ryv+fyPPInvgD13(aNMOZYmvIKAfH1;>q()O z%aBMbP4s2@9R!pwM&&dEMY0$|?rjG*!O-Mqy2}DeD;H`51A=EU8fxN|YOs5aBk8+J zVpSun^hmYbV6l~vc~4@ivV>PG9igWvIPpcd_O6-*w@PBCaQlaSb$Pe}?M;sC2g;m@ zki?q>v_8ZXi)C)M*aZ46Xy!U7G{PAu4i~;;6+S3z+KGo8ffuRaW}34Url$c%L-P-6_6OH!i0l7sRFCMDz|pRs=CqzjL`cJi`oADVPvIjVi454S1-fv zI^H0VJFR|Jhp?nmgMOWL8YcGiBiYI+>LF>;y5ChT7Os3iHR$hF2p6-IGgF4RYVj{T^-oyzYe)S;^l%Mx=jV@`*x$VBvif$Y+)x|&7^Qtwuo{_< zdfF99WA(m9-2!affUvIiI}h`n7cYWnH>G(#C(WR|jt}lEw3&UQtlI8DxaP=6xK_8o)l9hM5l8wIN)DIOYWeuJwSu#8bgRl~?!-2x7KbeV0yT!L zM);kkhTwzJjzLtxF5S?x^n0j0TNp*;?W6k6CFo5G9E#b5-!VX~UE6T$o0;^P-GkgNgN3Cu~I3 z`5J<-=0kyJOgsj>H*2ABj%n=go6RjtZps-RGYe5CDjW)s&De)_+$ck6X$0s3x$N+bQto`hn2@W)?du{*A)1+Pnfsj$X^3 z3wShEjCTk0j6%_14(uDRMxn6uME7b>WR}hdySVX0s*2Vd&P?jy;$!tOuo|sGH*;|9 zWRramii1Cw00(#&leI9iSKQj7k?R2xe_3QA6S7oDIl`$!VpDS>j3sA%txr%q@_%}b zw-j~V6z=rM%sEqj_&t^*07a4b>&-@V-&@9g7_`C>pGU2!PCc1+t z9wbK{70W|!aFx;Oj{2?A;@r6H6NM?030g@UOHyc6QcgVen~0qOu3x`++PZ?8bz=7$ z(UJSRp;SHZJ-Ie}dvi&a=e>%~;A=g$xldQAizs+f+shTVd=UV!MZsQp5|v*a;GHOI zY*`J|fWb8~WHRFj_0ZXznVyKcR&Lt2m*M+4I}&GA%E&GVzuNJYkLaj|c}iyznY{!< zkHtnzO~xS?>dMaQS`L@|F>;DenMVoxS1@mrKX%*{F`0;9n#}V3JBL!L05j<#=Y7yA z!D}gy)!q*+AD-}QO;#ThGXZeFq)J=elXdN>MMR=i0^@?I9qQ4M*H&PS9wmM#Ux>px zBLXsz3Fa`z)CH4M`RQ6%eQ8Na;ya8F?kVnl4C_#?r!mbNmR<_Ju>-mCE6g3(yM0CFt-}KyFF_K##zpBye|?%g}CXud}%Ofqjdd_z4gz|3z0!ps#n)F{N!6H>%ar@kR5X8`+A3+oS0jF3PtkA*=Jw*bUIAlqcP2iwx(Q~ z-SauVGnaRrA1&cMQZ_O(ewe)-XdmG0R=o0}rd4R{uF#I=qEo2ljXw;>ye_=g{r07m zj7MA<+SHm7?bbk-H!&Xxk+)r|17v6RV5O~P_e)yKkjbthzonk~)zwOUw^+C@;*)On!rOvaS(~W*Q4h~ji+N7j+0B|#}CZW zWVfvSrf{SR@8#8?nesj1BCAE$PkFt5f-h}s_UdF8Aa3sSTQ%W6xc|v-_+SltHjHp` z>>Tjz&fL|t#O*Q-jW0PJTo!nPcfD|IT16!?{A+B9oos~@z{v&q{QesohK7szjd#f} z7N@ejLlL|9hV}uA+{BPnP7q>!haek+Er0@%;qrL9;Q%shV2lxJs`Z4=sW9jT@ zC4bH@@lp|NDpZM7(055%ODAf?ziic_x63Q8;lWh9Pm`hQ_iYe-eu-Kem9-P33I*z? zEIP6rC(lAPCzs*#kG(mn>gZ`wT6N@c@)O}kYeC3Hq^EI0@{YO^?x91c8`+)qYim>5 z-@$&yC+8RcKT~*Gj%AuDzGE50F|esp3Eo8Q)Gco??0NS~%zDGs6>w&1^MO@`rkfqBavZ{bnv@ny{rgn>d6EIQSrnvB-ti z0@`udFv%;8wxPf@%hE(r1Y-pScgvcRlOGgLNt|>{fv>#H@YiUKYE>}g^+G%L28dAN zRkU!A`@p{(EwTLN=H_TSmaj7r0agWv&~TpN$PP@phx!Lhg7=r0CR~iGECiSNX=!ru z2P5aKucfJ*z-mC;H1E}jY89B$7j%d2=1GZbkR!i{yy>}*kgcbL#QhCGWcsL46X*joU`|<9eb1eq z@5a~J)8-zQ2^LMo?&Y{W%kKNyl0jw-yD+L?{g8nRU99$5WV1G2d}}Cf&GL4?2V3FM z@yJOQl2 z?|uKUC(N_^1Fp^^#=cq~jZW#1mbuCUIzHeSx|b3mc>&Of0b$(nPy{_bsUirfM%}UU zy{prRAoL%@CivLOhy1nx^w(z=NG;O+)sg4_03-Ng*r&N2ft#K0IvX%qJ$LFNdU}6Z zTnTodL5?z&p+)i@a>t3wXFmDnN)r|npaa<{vc`T5uwL+ZQz1g%p@w|LvN;KH{5u?c zk{VLKDHL-S6P9P(^mAZ|#DQVec~?B(N-%mPuCc`&581IUnay}&XIPsQ+60B4VLxnO zBTtPK>|ea)*n)g4&`B%E9N0VQMuBXD2Lg;Ts*CCd1=nC zN=8s|TyZ>2Kq~Q=BG!gWi8@{7#56f6 zNhyV}rrHY$rjE=_kH*LzaokG`mF0OR$zq*KSgc*V5{#jC81+yYO0thERb1N5N*I6a z$>fqF*?ZmZ{WMq1mR1(no90RPN~sR4yrS_zqE7^GU9VbDEHr`x1%KcsrWoq^xS0D= zLiQ4(>ataFdR+As<6&YGSHAvuonK=BT+C<_-qzd$#r(peuaOe|vwlDAp z&_?UXEr`QCHqrFa{7B7Q1BXIVMVHnillvqw zb`QX^)MbHcD&3uZ=hsmW8KFT}VgU8THeYsx3nZ_ujE&#rY_s|l4F8tfd<{RqGfR^d z**#R$Qb>-alJ3zdE67~pfj{Xsw&0nA)~-`GOj}Nh{OH|`#4g|t+&?3W&T?Y5XX}fm z|Nj#p<=*AL1MVO0Rc`}z55LgCTa@cND`zbk?w7P8qduG6>zGgDrL%B0Er{-5$JH)S zXHjd}-SZ0ygP;5(v%xT37wT3JrGw>}PnP%dFENat`zNaPF~z=I+Ud91Z(_b5enn98 zP~tIf7Z3QY(rfZ8X}&Ipru~%?d3MI;g83}3x&a}f?e1XO?z!%k>||uv2l2z66+;AA zKSrsjI0HM|2%gMmksmSSta}qbvSyGc<}HP-+c_3oG{iC2a|2VzsoGJh0<7bv)Bfe& zL1`P)pzPqObqH_5eus=(gZ7)B`iT4I;X2bOGo1QK%XZMVn!Au;v7 zuvQ-}jKToqDfOdKny#0Uy>onLWabAbks*^k&d%WDpKLCBiV+=QpVo7RmhK!@);lmC z0*w2o*&F4zbYQ7>19*CKW}|vPmq;dgrJAEVsW+?O5+cf3e@d&UoXyO1)jG%*4Ho4O zf=s!7z~eeNoZ(BZN?#?u-K9Bj?j*;26G?VUe^{&?#nbuFBSw)QwE8g!lYmPmn4bBR zdX5B)Eb8o60;N{NgUtA^n~}y=X0R{)h{dNR zus)9PQ%bv0GM3g+OSgU3^BSJa9Y>;!xE{R#s}mI*_3etN(GF?li+HDc3Y0*%4Jb&a&a>^7mv_W^gtY^v7f+^V^ z6H0Gd_V^I2pKYLu1tzreEKl$tD z0K}16dlnb#Q}ve881*quMN^4GCyiq>)yBjWW&@V!;2!t624P;PdF=DodW zeG8Fl&fYA*vOxp_y$tF)&-r)R z)@IxP0Fn)#ZbW2vUc(f&(DVCwD+DzfZRgZANX&Lpmli2>J)7h09D{1}U@LVx&emGW zg2sU%aPznQe&)bd;m_6lBCShLHZ=o5RHGqYzZng)9%$%9B06G;Hu128aU8m!EaCy}(XujAFB0mH6&>4k)%_9q z44W_CCaGWGQM%oLDf4jNeODCRvC8G(nT2N>#*|X+_;pCN+TRR_`5cUi3mbeWzLhEU zygj<=SL5lI!DKdT&~j#9WPRF*#27@FD04BpgaB>SJN&EQ@#G<)dK4(N-zLVt z<73YR8U26f8exWuNx`S-M8|&XN(0tA%$Y!;#vhz$Ra^{HE#Nnyy$5C1|HOU}uhi|6 z>p8ff&W7B%j{IGU*OcXak@zrTJogppbqCbSLlr9BtI)6rS1uZw7sW~&Ol%I5ZYsA2 z&kE-k2sW-2l*aFFsEd}9HdB(!R(@`f@BRvc1f2bVR(t`sFp@Ow@WTk zGaOWO10K*Vy5vK8>m>5K@*6uU2=m(OUjI z9RUS%Vs7@NNZEA2))EXj85Gs;#tT;5KGBc{#vWzKz5~NgmJZxPMPDA0yB^#$Tm{zW zf*&-ka_v|@;R${Ion<+HYgcJZZ+2+AAzZ+H;vXE~;JW=9cO~tUW%HRXO>^)aUZD!c z8tnUq?T5L1=;T{o6J9I~p&{}mKg8mmt^y#hWrwbN0i!^0q#jCXC=37U^6W4J^?nsM zX`|6SNJi`85itMqkg!ddK{|G5ZVrf7iJepQWd2q69v$1XJex#M5hh%%?t zLp51=a1i`Zz}%|RW7*g#kqY-nfy=JnUM9I!D7IVdsevW!W0+I^+)ZvK9<@36!Y3|DZhbyQazL_LzP_WWkoeRzs~6uf-$^dVUpWcz+*c ze*|vFN;U}!U_S&*b|aRwNd-$G*NtA7m{SY&)OJ$$K0Ozqd}NIE85sSBs09WjA5nTH zv|mwr29k5a!AmuQG>!L&+~%x@iPg+jm8UVk$)HgC9AG zY}SUm^K?s^BR4SB~>(v07+bXc5h_O$}#$K36i9=exvvO+DQUrgxOcp^fjw0K%AOA^-x2|DHXD{cFJw-adaDQF&gdYXfbu2_l$xAd-J5PK1)1MrX_jk^lgC8d8gv$(ds%MBIol|WVs zJuD2jS&9S;*m||+xH~cI0`fdPsq!7o_$@}b`vIMocz~Bm$o#|3jnfA>2A)jEHTaLC z`o{`OQpJHax`xr9Xs-@;zh{u$FytTuHq2V9H=Y&0zUqrO*q0p2Z<(s%b^z+5t;OI| z&`I~$e_9OxD`EVmBLkAj=Sq2BTztm|Wj1=b;(f(w^6CG9B>F^H2vQ z%dwj!j_*i@9@W4>7rgX8Qgl%Y#Shc&i!qWliPu8k&Rf0HaR0daDwhqzeNuWY&gN!f z>|%Gw(yh0*+J54^-EX?R>$7~!LHm#r{z;Jn+(i9Oodq1oN%a}EC$Af2ALO0rM@Kmm z_k8yQpd#ArOH()g0-nV5jxgIVR>z3eq}?Uc+wm*0gf7_;saig?D=uRmXVZ523! z+i61Qn&pJJ>syHD+pdN`!7!SK(6rq9?RRQ+xnp)=#T-I3rMXW}H1`6@f;s6Vz~6{F zYy@TgtH#Ya@nA%7V#ml$jIntrqWB?ODB`6}wd)OaJCr0^?<$I8Fv>AK;j0sN9}r{{ zkH>Aub?=BwboU5;s?8{hIEbVzReuW{Hn+5k`Yd-8n&cVO)X)hNJhR+)AN-5 zm&xXs9J+TU{u|poSXr7ujjdP_sAe<0On0OK>sT->gY)J??WTXqUV~e=e`O;}B8gUG zAMUK>z@SgSi63WjVMB;@!(c|9yd)GeeDYWjHSsrWNmL1TxB>Iudd( zFkt8U>KQrPaI_fJe|9`8Q+H|r?-+A$bD5g&?yq_BEY9dnQDk10um^S^nWh!Fpq3WP z(`rArwz;g1Tjlnp{b{2v9xXGB3(qn=a(i4n1_B2%gUmOLSVn`Fp$*kuZqFJVnTOdJ zf4(}KEGqxT$k*af*7?dN#M&Dq+Cdtim3kBth0<_)y3zHs98+t)@nC?i2y0KV8pV6c z>43U3m!XO-hV!A67PXFsLgt0oW*NTb_KS!Q7>=D%3<2!WjL}UsYfWpF8`HyShQS@+ zwM@sK@LZvi0T3~Z2bl)F%waJ*8Z#;BJL88Cdr1+r~ssT2RS0K*LoduMj)G+MH)Q}!UGW$WS zIv92Dtw(ZH))5tPLBVNzIU}6Tif17#Ts?v(C0@51oO9r}!sCphK1yH(X6g8P-h~es zito0J@~z5H*Nt7eB=r)iqWG+mCXoWK64)LS*m5C)wDlOGp|ma4dRW*>-10#-CY$sm z<-)A)EvU{y`IhdHBI%a3uCYI$q2E+tA&t_1uxZ1xyEcgL8?3s~EjS+kqDR)C71Gzp zaM*i8Pjm^WTgfgBahID*r!%q(L3pUS%O%TOZEh+Fm0wkG;Q31(#Amtty@~qs2;~~P z63vVb_f8Q{Z}<{)2W)Q~iogrWLCEUzlS?R{4k@Zvb5?oE5Nt@14WIABVvl3yj$>-% z=N(8%-JEOiD--1pRle)aB_sS-2uqN)0#b!G^$9$FkQf*MNIEyNo%9&AKK|h2Wikr* z;|=@R`sT-dxgoc2l7rpLUjNTo=+)KTl+7hF3i}J09ozob$x^zhju!_*;jEm89gPuPX$ghVdX0=7?v&L>0t=JXf<^O@_LNVs^4tLUGo6rXi!*R{&yC zU%!Q!$MNs10|c*%8vGZKa31T&9@^vNcfKVda38nXp#6Z;wmbaL2R8^)+VkxsS7gv)A-OhIiMUpD$Kf8vBEye+s9l%G&4u@tOvxtq#Vj zZw0-_rHglF%g}{K>{*RDM#fiOJK&_1EJVH^C3zza1xsZD;PME^+|dK`!Gk;~QjbT; zw@kE6YjS2V=&qyBU5W2dt}&cby1!t4YwR7ul>!LF{Vir*6=)vj%(UBYj5;4=C0B}hrcYE z1{p?tSb7XN5+Z8^PbN`kDS0n>y>E$(Iw5Vc+E-^QqxM*Q#jCbPEWu6T8}H%WG0?(f)lMs)B%{jq<94Q=pPG$z8 zEo{;sd`jF zn06ZU4M1MXICm6+h9=jnXpcL5ogYt$EWIyox0L(`>lpW@8Q#)3%?bdn=bVmJz zP>mOBD*#FfXirczj{`mOKr@zR3MD3@+Xym5>7r}k1Cd)^m{{MXDRor$!Z!uZ?Wmil ztU=L6wq-Y#^k7+o5CN+n=fql{vVJ$SqwF_CFvS}m+NP*4V600|`PY|0kv`6=HIdIu zq0(i0HxswxsSmSlJ8|ZxBIoHf`E%|+^<1Jg?+za(rHaT~>yqM6=u@iXmTmA_Q^Tp{uN>c|)~|7# zr=Ei<`WO){Fl3%q{b$beWVCznba(CgIZ>& z!uI7~Op{J+#w3J}tkK$uZvl?X%a=k1Lh1PikRr8@+-I+wP{_nlVdU@w&1GEa+&hpcI$S#;e`fDIRW%pDJiCt=`I-%y5L~uJ z%C1z~!O^GdX{&ogP({`G?l>|i*wstR??mpv$*bSZG<~xi=|3U337x9^CSNkQo5nq8 z?=V>FZ;#xV%h5TJ4o;Ds!)F@S{E{%98j8*As^UM(i*_1zGL@T@pZIp5JjGL%XHkNe zUGe$#nv!CN$}r^nO%hTv3TpO8Z!U#?rtc<7I}!7Z--LW`)L8!CSO4=i38@b^duxcH zndP`teRQC=cjKiHC3;GIuulYamFQ^$)rs3p>L)F z(%v^jf9=oV`{(xUl3@4tj~XX(L7uR95t1!31HJ7FLxbk$zDl`nT8*?Ou80>aemwE7evH$aq1&i5HVdIevB(I=%{(8~`{+8QgjA z;Fly^FE((T(uxDRLpW=iX+#VA)v~{${Nxgw-ZCJa;vxL`PZn1_FK49+5s)D{2z6&vxZc`X z4gC02>4*r}0R86Y@6sT0Cm|#bg@89D)UR=cj_ck)TNn&=^kOw_O2+( z9iG3L%*?y}B$T|X;|0p?MU=HGcV2Ro5FXCNA@qDm2yHfEEhV{^LOSQjhCDp9NcSm| z>}3bVQyqn^%?BpJd-ep_nSAyRZGhE0f)l`b(yAqB+Y~5z$mkJnpgieX@vh zfo*9aB;H*}IA>1Ju5&VakEw`uV9+S73ve+USRH2)6Z-}h&SAdpK4a5)7;Vpl5T##& zfi0bWUUw6iH+v>qtcLbpIN;cN-w38?S9#~ePx1Y z+pzqXJQKg}9N7j#rOjM)o3WTgZdP9lq9%_}V4YI8atu z%4W3v{V3y8o<;knVOL(BgWi7o0-wWvsT^h%U5mz0{#BC!WOQ8p8vC|&J(O$VJj;?Y zND!Uq0WP{kyd)>wPC_6taW({Es@Ru=gg`gfaYy~lrT++`=WAqS$EgFl(Eo_I|BS8B z7qry#Y{ild(TjI4<0%IY+8xa)ghT&^6GXqdl4em_K&G>)E$vKTMrBtJprk>kT}mQ% zaq7JH+ko^S$+nih*|fYX1;Cn|0FLJH$DmowR_Qayc8IRNK3y z?UW*=?G82H$dlm1g~Xy>zkQefM^dbeC$}YyJG7sS8FWMIlFR|@lkn%ga1+;C(Y2y_ zb5-1*`}*)X2(imvhrjNRI#|Aq7IAjgyjNM`Cs@`Y8ke5;z&XYii}-ClnZfRKc;diN z6XW<6LHTs$AiNj!1NK|KZLle4A_L^2H7~(|58t#t1>{EGYn@N;wT;lTXw8eBaaZLC z2c62in8>19JF4R681n6u5VOli(9*{kpVrR^ClFv0ls?}q(j62+|4dIOWb(WBh0B7z z=rq$3U}shd9{h@<(~lCY^I7Zb**Bpl(%yj}`YlUcZ5L7XN8+o_q{Ou_37Lcb(BJz$ zy)lLZnOSIQyY?z+)!qo7_KLOC={}mh5dm)>6Iv53OYoD-PcEkq+D1mWU8GZYPrSy? znsc{E@nO7WRY1UGm<qi0GTYGF~3)V0lQ zV$BRc@(44=rq|eIMd`tY*(M;`1s>BwhHgCpj|i&WxV_o*qcwxU5gF_4wypOi`%kf1 zd-t{{PMA4XAI(+{zi&bS?-G)WNOgqHR9@Sm`hA%ce|df)83>P`{Oqj?O+jDn{wp%x zYqPU;uPE09wq{Y$>~Xx7|J_gi?&P@$n~~t^hd0XP-<1CA+5b4efF-T@r znV!BSR=Jk`FnHf4DDbbZ+{KWmVtY7UC0Hdb-h(toMCZGVW4j;aHcxB4-`}qC`(%GL|=m-lVhPmrfjWa7DkJ1PCXpLlt-Zg^SVN$|p z%VI5oIjOsQOqMk|>avdQZwefhvMF;Rf72&Y^t`cEY22SX$fk#9$xQW-;r)8Sq_Ga` zB`HR^E|kKMOX&y?{D_bus*0DSGtdGkeKw>Zu=L-4cA?sTMMC@Pz+{>6Hl*1T3LL%r zP||b}sJiGG_y+4V@Bj`9rsBA{8Ak^EE#5c|SR`TH+wpqJ#k zpiGpTJWc6L8GC-OfU=2~{oq$WJugq?_$ppqVYzxlx~pH}1SWCtQM0$-4U^7HGPw?9 ziIM`-cESXYvv#%~vd$)RgkxcFMC`0a*kv6~=Ax&qR9l}896lr5ag;*3FJFIYqa-y!3Hh;$Bqw>9ZYokY@U2mDSp$rh-%k-o-aCiyO;vrgDv+@R zP&AFVCjkP9STUg+m0|G5AH|Bt{B55b!}bg*PHalJY3bE-agj=Gb3iy%2lGf?OJ%|u z@Zl!4Gzvas4@Pl^f4{tF)y1Gc{*+&Yx0lOPG|4ZP-yGh%4g%2*z^;S*_y{;wKP#a| z<}SA3TYpZysH3vEWtW4KT&Ciig5WCGmq_i_3f&){^jcok3W~8y-!068E5`9If9-o~ zN@a7Rb&uMUjg2JXfLZ75eLs=Rxmu)+u)- zJTX$IHe>K{A-t1$qu)2PEQ(e1V1$2z%De#tvP&11q<#Up;_;^F6e58)#0A1IRhzo| z)z!f#rtGF&gV%YFwMC4kHjc=eXWL(3w*}u_3CD&B($<-BuQ>CXIj(M^@Rnl5zTqsJ zr3S;s$^s#t+`y(+94LbvwMbrSZU(G#*wHtjt2z3 zy-BUJZLY$U@-r?k;cK>_buJ6Os#XlDe9pSYLXE2476j#dd)OFW+)k=O(tb!`ZJ~JK z<_wW1=AV||AHLPf#YXxqYvL4IHDq^fp#(LxKS&GRICzG5igG-O-f;g$CUMBE4UthD zK$aQ+cvE|o&a@k^ITY{BOvQHgtTXbpqg@-!vv9@R_dMBtD!k!h@fIjKpf=u`d3Be8 zH4_`U6W;RT1{+ua!3d$kc<_)rSc>J}NXR<{qg~_qMYW@T^s;xq?{D+#A@KdN8U1xt z*EU#F)8w(n3RC(WP{{yhq@Aaa_e{zV9ixnA4vx$p#^yy+^^@n!0dspeq*as?wybb< z_ttAb6jsKEIsr%P1mNy^{5HUwmz*tQ(PxGGTV+AZvTRF>f#%CWb60mtHm9QRJXJ_g zP`-shkD6G=$!5Kcd_ZWU4MH1Lm&zC48xKbzd47wI$|9|O?3feL`j=8RDqO8*y{R^NJSEnrXAP+(MsWvmaMRuwdu~&8e`*fO zY%IRwN)s1)GqW~v1N{iTefNsTGeyT5w~HFvz)Sm;#tq*a!pRi?7f)%a#OUqQ;mlM6 z`ki^RG(ug@$T2?Aw&|4UYGgj?TuB^sIy|*vQ)#1lDpfc7=Ar;W4Ll9*o(&*rhPBpq zo6{@8Zut>mrA9SDoqMpxUZ&)*tQ*;R?0#A=YN}ImXT_Ecjm>|0CnJmPjYPdxMefXN zgzgei&eT*mnjX@P6~*}r{X3bApkPP7I66%*(q$#(#w=^_!b1P?Fw(FYoZ^F%n5Bgi z5;g4eW4{H_hjOOctI}CU$ zrvyB)6%7%0ZDg!f?|iep{Lo5LqGO48ixK@!?|3E!Kg=y|t(hZGGDDi$42_*o{k9oY zy+)gw>GZtyc)m3si)){-l1;Mok~uA9#uj*Q4std6gyTequB#NDL>NVK*U8TBFTrFk`Qx z05UqJ?RwZ$q=d%2F-1ed=}uTPo@Qxx-OY#^ii2H|y(u&-fk>oGzwnNU;ds+Rr#wuZ zieN?75*~X5IW88_cv}i~M3^Ay`I22iq6wEnZ+g?uixFq>6epB*vZm3S6lQZfaTmms z+9QEAD}0}~^{p8QflNr|N1&yk$Zn-q%-oPytfa6YhsmZ8?bnKgm5h@VXQdl_O?_c6 zQ*a6a_d9_l5=R7@ow%O-`CcMhvr|wR0u7*x4caS_C;CyIvUgJSONfxbrVqCIRf-(EVuDKpBwgCpIU$WO`Vb+A-V&+Kcm)*+hewbJ;}mo;5k}W( zJoP2J9FAX?Txc&<4m1Cgau}5=htp26=+Wu}pK1jDcAyd3&(y6?>1+-NxUzgg8Tma* zMXM}JZZ5$i7G0)saM8Iug;~!2&G@2#hIym@D16NPpe98d2BOx60{A43SFf-QFBK=27J`7Z$tIP@& zP3O%f)}Q`r>4g4|Wbm>0_wT>~JEP>}<{wdX1NX{;)RR&vAi|Ezq~M)6?7Rw~w~PXj z&?Ggp{-6{ZhNs*-*2u%fWi)zvGS51VIZnkRCSN^aRGNVXeBHUp7H*(J^muf0^9X-R z9n-OL<$(A3m{kMba(njy)$2100^-Z1_FcbxAaMXhWjN)o+IyGd{hxfafKP&%m7i4@ z#2aC*Nh9Xk8u?lB$@G;HE{lSKOpffrPk{pV{D;EH`zzHP!Sqd|#^A&9xV+Rx*aO7Q z3ic+-5WB1d+~_$n@z<{xJfC-iFISyhEN*pB4{UC;H6Zw+;F&Zh#c%uxy{MwK z0T2k@0AlXYn3PkTth~N-gf!(5&2sO*_w3m-Q`G@SIr;tj!EqBfpkaECD9Z2O2fisr z9tIyPSN{l-Z7)>-{3MH`8)OQ_N*V-Jc;@~rV}`###bD%D4WL0UmB8ktM7eps)|Ps- z@=2uj&ffv`E<|K4k_=orthIA1_-6gw0er;XT?)2ch6T5q;KqL|yPcXFK+0|Qx2re2 z_w?RheA&hc>auy$>b0{GLHF2w%jW5=_2iSCYX-2{8AW5;IuY}^4&b+qU;@_Y3(4=R z;Rk)b2h>yG=z?t0@|%`E$EC@Q-wMFTqN87fE2vq3vtF`P48B`9Yi9)j(TZ-cb^ijp zy&I?<8~`OLia{mwIz!u%Qh<3_;K+?d63<_T!|}xs=K$u^(3r{GcKIwpI6*%kvmp}o9KEWf9$f5?Lr0&Q0<+O6xJ=Istb=8QBoJj+=~E@jz}3zJbLQMZVK={GfeAWDz4Nz zjRwTzBo`e6<9F06J_yLUnp*PhzK>~lS4G#ZYjUuW^_r@Jm};4%NuH8u5k4WCPGof3 z%vVAW2{<9M1J64mr@57|5gvHqI@iAYkuuZ+G%@3>Wsu}cfg(CACt56~c+c(gG>gWO zc)imEbicR>*3hiav0W_IH@d$BbMw22xkZxxZic!hFuCxNuO|9S(%AsQovQ%4H+0x; zJ+uIt&ks_~lB?Mz)v|w+r?M_NP*=LJIZYqvYS8+XQDSc)+-ojw`QuxxT4{WIyqA{a zX6_Xx_9CBh#=iy@lC=Y0dd_Y#O1+FzCgRIgi#ovGq+WXZOTP zY*Ys%P1D@dssmWn0q^6we1+Nma}sB+5Ixn%WpiQzjtXhNEW&h1HH(q=T&Sp1S%Ty3 zh;r#iN8Ki7V$hT8=T5~+9#_E-$Z<>0s8QuW`e1({s3w6!j?cbU$FK2D-yx-kkBjtf zdAeN}7VcKRbm`KGWJ}$`?>@S%<$#7f&a=kJ)f*-1j$|h~!zfm!FbboPMgmHWPGaL% z#mz!BfoNE5>VTH3Gl(m`q` z;ku>RsfjD@A&anr0?AxItbP8aKwr7N=QzL^eMu+fj06uf?Col@!N?lsi@7-yBk{>r zjoDnidI>vHFjfEy3}m*MqNFJ!`F}8!|Df^Sv2W@gt(W(TL9-sGIxdfnfd51cF0OJ) zUKe$T2TF0-Eo@z52JbP&PBfI!)h~kZpV{*S5g#Utt#kabmbWQ3Jo(CI#JY^NQ~Ip2$OR7KihJFQ)BHt-wC3aFA^5XiDVVZPbju zuR-Gyq0e(f3@*Tsg^{?*o2@b}!WgEMPki{It%Nc6q%xw`NJkZ%TA(`Ls6p8Qr zwei8_FNf2FSa-pa)J4nUwBon@9aix^`5qdC);^_4K}rhxj69r}A%6r)=1Bf@Z?Vr0Q0Q zVNK{@gF9!`RjMM?BBi#G^WEx=GJ9nP6w9Md)xbRqisjwlfu@|IqfWYj2`lZ}ptH(g*yAtt&9 zvhrIARqnbCx=!568@-AZeY1$UrlIQ7V?RfqdF7H_ca}r~?F>pvNfQK9Hq!w3Asz!y zUs`dQ`eFhOJ)=+H&XBvriO#GJPZlh!&uy`o&V@&WJmSv9Rqr8kLTh(5C6! zR}m6-^Qp(vSdnc^O`o~F^y-gx03W4Sl#7(<)JunfYjd~S%1`E3y)zotde8k2nTNeT zxPETnpl1&0MeB7lPj$nPQUBSjVuVVcO`*ylfta}i(MluKxz0L2GMmp(Gc@c&2U_4c z?Vg+xJR}@}h$J>pk|>v+*&+sBe!+d6V$YLvv}PE8wb9aNvpjg70lXAFs?QyaMSo2p^0TwV?&sk-WLmUVnXi z;MK2^x63hemHO>hE{m)DV?<;ary9?F!Hc8)P)^K@Bi|D?hi&>k{1pB9HIF*t5nVmT z-9ZYG63=a?Tsq)YtV){fqoD_W%iNdd>#1iGOI)2%{P7adXR??W+(%R5G(M3UrZo@Fj2){34MY+1gT4lgnV`w(fw0HVOf-<2%k7x#@7fdL@Zv=Smka8|6LMk%g*Eyu8dCqE zerD!OHWW=tB2Tw-JIJCkVr%sAWO(&4cf;fGbTyV3$4s5i_8Su=_W6#Q>9uU{LIvEj z>$W2B$~hFAF4?i&4A_aPaA(ebH;=d4sv00DmmU#lw;);r7^S5X(9>ZGG68%iiQBJO z`lsb!v89~__LK~K0F%b8x>aN748c3sAo7PpS|T7v+JV~->nWZui&UBz@sBIV)up7c zV2=dL4U9=7ogUjRBAi~M)ou4u;ruoJMqoM&wCv zde?@U*Iv9Bw3sfbFTC=Jd3&4#4Qu88kyod6bZ1cGP0Y+4!|6GNUy!*df+W7Q8#MC{?98{V)NkDO7IFW@ z*_qIiU!`^UDipTISDBGT|hO3uhZ zE2TG;jhjjzFa0JxbTWm&?41io0GCGiuzMTg+;HIfv-9O9rzft_Xo-cOg_H)S=Pqzm z_*gv z6XB3WX>c~y`!1EGgec5BCwV<{%OnQwF7oe#5GG%$vgrMD!Rw>uFw_H`G(#K(JSQiBU0(9|_*6$-k3weyo`zw2?DKZ`RUbSu5 zc9l9?8i5gk7KYW4+`tf|ILfYqmKtS|S_Y8Hf~qpwr`O2IJEd7R)JNIHjJ?V;;Z24x zA5W?v#4{Og>ExrRi1Fe=y{BZ7keC!d8P9ldEo7T3-0I1mfbaYJmLW@oU#3M}Y0h|~ zQ-=5a1uaUoCH%xnHJNydq1B+hZEb93db$x`p4x%w-g25$6F>4eUIp>;MGazZuriQ0 zf50GOY%kp1)YDYQANDhw`)v^n+c%4MyAgcGcW4h~ilxM2PR*aN)epM6jZhUF)a#rg zj448{J4;%DlxWoWHYVA_=h;Z}d6LZ&j@3SQAF?^)veoA6i!fiWe0uhU|0`(Z%*nc`rfmqp<3d^;0>yr_C$*+V zOOk5qUVU0{qC}nLZ{y{sDDIs_M2jOmYoiMFJgQv^AYVx0!nuzj_LM<;s4J>E9)moS zBEQ!9z`zOTeR9?tuZBX5EVE6{{hp*D`i+6+Q2GR3@NB4)P`8aDh}&jRzSGmUZ~J_a zU(0yL89c|^8{!oXGwh1Hb&vY2&wzwLvm;021IvzCYw+M&}Y4X`yTffZ;7&X6{ zT&IQjXXRNfsY<^{{qa3B{GB~~_R7<2<}(~kw8eO!rFy%m)fPt1)0AP9b{98AJus?o zRu?i!X~G&*T^VumPsz!sqD8}+Z4yh=ix7R=g@Y-B2D^-1g&GWR6pvkEn`y`;wVv2p z?Pj33(q#`u8n47DsWQ6?9S~EmpSeN8t95YMCFSaP0?~as4oDw)H8NAcsg^9XyNT`#NfSoh;9SigXebwAm<{GXC zXSEEv@EJd81F^u|+yZ1|x6rXj#wfk2z_%1Wy!XlTw7>B0@{^q6Z$zKKn6~O${zros z_f{A$KCa!h8hIYJ;{8qPEdl#|F-gskk&Fp)VBVgh62jj(@x5f84w2hY&v+>0J2f@E z*d`%fboqr5Uicuf4hQQy-arYTqx)REz~mur#2Fc~Hh_0)p~w32?tc#Yt#rWT z6YGCBkxjnj3do<1I?yLW$-h(Yk|&t_0(DR%o4o8AV7`@zOZWbLm27fC7MT1Lk@1yG zQeC9U{vmwC3E7ooz{ys=-5t{*eTPJ3(IqFFtZV}q3UkzS<>!$9t8aka!(`w4;62JW zX#gbAbw|C=Ssy5U47I$jI8g>->Op{#c&*i+L;kX@fc(yWm9Xy@4#S|p;~cV)5lY4p zAB)TfNcbgo5qbIpKEvz-nqU7`(2{)r@O08o!Wb!8a^v4%Hlnue1wFAg!k_4>kmjsI1GsL2^I$8 z?*g(@4HD9$5=&`m<#*E3^vaHQW|lTUB&4^W6O%DNs%{ba-+S44eq{>A|0*CQjQl;A zR&q=2<0~Ti1ag$GFcJrJXka)ND^4TIw`FZ4EjChi_HexGKz5XYM$Jki4>CbCwe!i$Z?BKcmf6 zPrgRpVcJqqL|n4udIVr^Gs<`{h_f9w3I6blNM(umvBo^;zRe#0>VVYj^F+ht{RaGE z=}R1E=7>=+=lk$(3EU|Qy|I>>o&a7_rV*Shv!{MP{l2Jvzl)m~8axV5Hxq}7C_@B% z^vC$0WBUkI3&Ju!S(2Jw1}lM09a7jezH~>34eS1p*SFOEK9t3<-2aR0`|HjEK^E-1Nujum@6uFb||!#GBx_+c83)|A0%gHim0|pc4vJ~C1EUy z;yO4>J^Y+-837gpHgs=2<@J+l%pS^0B<7q;j{^K2T6_!>kzd0ImgmDl@%6`MkP`04 zAZkZ%mBe~28Ozb1zNxZYtC_(?$XsB`2ixQz<`!beT4T?TN{pgGB6f2vW?JNC-e>5p z7d9E(m(me>5^_PI)9r&aJ2wA&fFd1xR%sFqNqEQO!@H3L&@W+Rc6wwFNlYtAs!dd4 zLu4C&RC{FFc1$#X!^gnmg82QcQKXP|Ag zGuh+KBe}IpkKtgn<8VJ(3~;~-Ja|mrI`Dp6R#slNs&%2OYTOI{juib3@b|F zDQgs~fZAe-RiS6$qmL14=^ytCeLt8KUl!&SfmEzjyh@{01XKysM%9*beN}v32bHgU z>zH7f7W{T7Nj%Lzfjgo5tw95<>Lf3xGpHn2TB{0gp8f=oo|6_)^GZLn+%=*}i%ySL zHcua+$lQkM#Glk{)xl#Z zH_3-yYFdpM9<;|=^z+K|%p1)ZT`cy`u*_Oyszi*(Ru%K~)wkDoT*P5dbuT=d_}=>) z@?s}Q2!uJOz9mzb%?p!WoV;!MU3$IX9K z;#TW|V8)k@kdB>tn2wp-(47^~F44Ajbo*=V7Z?9iD>wiS1(*Ru)8D7hK|bmj%#lFK zAQd}9yJ)UnJA1H+osHQFuwC8#ujNg82McF%SAKhW`}wt*u5aL1npB#}nU#uJifkE* z!amNf_9L$1ziyHvLgdm0k(0j)^*W(BoegifO*ySKsyCfA@+=U|`#Qp1pe`GY(0Yl} zvGZlGopW;2^lDWYCX7q0Oq40BPi)Cc$cy7x7#`(*?Lp?{cFA)2`s(9_=;73{!Wrm* z<`jIc^UH0T4oQA17b=p}|871}aNzjY?sChr*FoK37RcF7NdlrnH9OCK)CWM^4)(f90S4-dwXva>aDaeN9Di(cqF00|{<0#H2g$o|&*_HJk}Krnpo6gTQwZFx{9b#A zt5w@r?SuM}2dZ7y6kVAg`~Bfu@AoLZbZWpZEOL&0*4r)OI;rtP&(FZ0cV$j>06P;~ zw#75o>u=}!8^A(Cq<^A zbYr5GMaZ_X09?9A3HR05^RLq~5xv{YGO9m}>@__vF1fJV=EaEIzu&UJEmnEDOq!x6 zqR9lLhcAb7IbLwIlvR~2JV#NiscWdAxt}_M&c-)BXH$BSJ?ot2+<0(O$A@m-I}$z` zogpUU-)tmzKRjYulG_lOp)u!d(iivWxg7XhQd+_Rr7nNzHtR-v zKYUlWs)w&P45g{4TeNQ+pB5{)K2bPN->~&qYIII_x}MGSaBJL`S!{t4FHLyG_}Ojx zUfpUCIMPN+=w4!77hY6#u2w%!Q?l|ExNRwdem(z~?PjnsaZo^T?QzR>*MGHoEV;tB z4X&2T?6`WG{x!a9cjNc5{8Ndb64b-39JiFM?*n8e>XhXtKXa;6-ov_WE(&8(-z^S}#Lomv~!!h@F_` zf2bfyk!2*HwZpZY6+fZ-yP_ifyeUfc{E$<$@Ba?v{&T_l-a7}R9746H=Ew)q(2hhT za;3-`7Ny8HgtSPI+QhQ0OE*XL?8l%J9iuxUN#EwPLrtWM0{P*?rKwvbJX4kX{Cjl4 zUl<3QSlN>Z^coJ-d}pShfW(R@V z*2%)onGOZai#UL7|5nQh35k^HVMBiR;n@MgKGgD~rn9Dkys(KKh||c_&KSt)4zho+ zgCy!Mj3|PD&PMd^ARAjJVRtcxKUWAN$`6OR80i08;%qI(psAotFKy=tqz76aMg8<{#A&-^3U!oSp53xwzci+&JC%IPDzGxp;(xgt)kQxp;Xw5La+G zdDuD|xpUY$G5%%bpLSjYolG1p?VT;{Z0R5D8X4QUIEyhbJlyDCpTC|H=x+J%JJ~w@ ztD7JRYf0;3M*T>18uZkTY?a6hPa0~FF(KNpX>jhGylHhf7I0c zkD7unx&O23f1LWitExEx9i{C+h0(Q`sj|l zvk>N`22BOrghhMqx6n)StGwPfjKrInQ3S^i3A!x)@P-b#4d3(d?I$*@d-25!{OBeY zQT&CHPp;v~^~Hi{yX73^o8Ng$_ft*(C`r9@MF{$@44<>RU7xdA>oRjO>v=u){5L2> zvHob!=#jDCO8%EcNjkLN7mphVv^|Udo&m&tm(iYlD#eldFYo;C2i^qxqr_dy=L`JT zPya6)k6ur|{5!Azmw^*4$$)+z<_W65)r5a+pfkWJ|85`nqg}D*Nd6X&-@^Ob{fKBm z7rejUemlZxyx@_HkK~Uld!cPfFf=!(*A0e%Y41VUY5A!&u8hP9@QScimALkx0&Z& ztuk^;=M^+v#Db7DKd7XUO?7Wzk7m`*c!zkYmz}0 zZ{%8P0rjf2TC~_g?XRLf_W_=17tPSaefF-8(PEh;i%bZ7*3P(G6kkQXn*CfcV{_Y( zy-SqMuTKq_Dytxo{jps&x~{ISskPV|THXM7KP8s^QMn@CA5A6@X;8U5ARwT;!fJc6 zh|#F7HYVuVf#Dqc9I!!r#%-f$;>bm}_Fn)z;Eu>pB)VXs?|5RuZpHF=WeK+4wE>H( zWO?j?H9|zE593k&_|a50nz|~2jB9cKJ4N8axa@lbcp`s{Od2UJE)H;7m)B`>ExQ(? zr{jMG7>IVqm$bCD1{W8zmJ}6L_4dkGTUb<%j5Ej;+mRCbi<;dy@+r6YzWVALxxy74i61E`>65o@O1ek*ZKvqEL*07 zEG?@r=gh=hw3U3+ecXcFKb)xi1^EMb7}{N_&rC#T_PTr{Dl566i(RgC%s!^+1li`Y zo!=yXKNXZ4@*l!a<1-(b@!D@O?~A5s%6@8wZ0?8vVi6UX>4YD3V z_8X-$+(o#SI5$>m&<8=uyyoWlvRaqg3$K?0!JK6CQz1uRan}0r5(x-`wgn#EmzHrk zq=R2yUlsJoS22@!Dp8HSi`<)zJsY&%FeBC6IOubB35>#Fxmg9U3U0<>CJ2eDtXlF#z%<@=bE6~hxkW{d- zDeWX`qoi_nZyfUY{d|OLMd6shZE5w|;Smu~LjQ zw=|>Clb9OY(0X$;h!`?gCtHlCl{HJZ^kYpR2r3ACYLO0n%sXesHZU-sZOuKFbgGHp zyru*NL_l;o{zc+)+R$cvyO2!I`oMmHJ{05QU0 zr_Q$a@^~ZWL>fME$A^@fd0Z|{d+rG^pA?MYXGyXrjXa6b$gM0^bsOh%#GHo&*i&stpjnk9Wz(3 z){P~}WX-Fomy6d*P_laA0|W2b*x8+d(k8084wb*UKXZ2p+urC=g>@QH6Q@<#b7x>C z5gIHc+%f-Qlm?9cJOO*HKeEXgHf{6VRFW22e4vSTZYudz8EK!YBk{Zt9lY~>z3m0~ zcUODqYGrxX72aG8D=Ta1BUE;w%Fs8{?g<9O%6$|PRRw`v)kAzOO>94D7r*;5{$-!d zKb;M;Y4s(0-#neNQd5xQ&U%!ND^(&t!mQ5Q_wTC{5}rV5`yz#lV|+MD?+8(nxeOo~ z8PsenEcSfv)kEuigz%F;Q9aTWnR#$!`BC{!fiY==3w#;|0nM@V@Hjl9OFO%zKoidC66@f1W)`_>5m>~1Wgnu z&9(Ym>lWa#N#IUvwt6?D6aD9qclVoXa;t!6tc%(9MpEq23V>#7d~Y*WT6spD6{y`E zFAkm#j*K{@#)=5f+Q?{!D=5i9Ccl%*bx=OpG?zjaPkBE&#dFD%Rl}0_)Y3EeBWBU zRa~k}4*R7vSWcAMQ5sn0tqNG4ac249} zVd7ks=&v`%TI&Nmbm@Xc=Dopfwkm#K9;kbO9g(3{uYanw9yIpze(_5Ih$hgZHZd^) zme}mg#>xV@sL#T^A57u^zqFpNA9*qIvDeU)gXlsyU{vCMzIL_R+gyZm&Q7CLb7j`4 zZO`XQ;6Z`v(#t6;8y3I2Fma1ny^UgvUpt^Bqo5F)cQd#akVGk+aFn5g44at)VI4u3 zdYMUlX!9L8b2YzBOW%>klm;EfsgU|NmbPWLU0&tv<4Z}Zjw$B+TO>V6ZU>=Jx_uQN zsU-$U=(2HldA=~S>gQsG=;BhghEj;9&t zf+qi(%F4<(cMG3cbAWNBY8IR9tURmnW6C(# z_%&mOe)3>$^n1XMe8tRPl)Nc~su2&Z{*|=e<2Y%d-eE!^ccf^NB4vKscS?IsxU3NRZ4rIwi8&F0YnG6q#xNUjYQUKJ9q@*&PaUC~7g&Sq9%k!vMuE=;pE+GF$F-38o<06zNEHBG*OidjnB!XoR+IRU%5VYmQK z{lXFoYM`zN_1x+&E9YbiNIYA||JPBEwbu zeY;Z@EQza4#<>Qk0_*w94g%k0%1RGEDjY4%8L6e&*=_Y`$WkqwQ9kZ7;d>rJfRA<{ zM}LZ~<%=rLgBcvDK4NaXlVI#suYA|edBs)k9+U!u_WQpOjw3Q)?AN`m@M)HW$$Rpf zUn9FBy@Y9X9;4Dpze&-Y*VZ&NM|v38N(!ZVm(m}T)bfZ!)g66XH*x|8)v;6lw`tb)Cjj3 z+K!J-z5T$0B}A*5r9>^!!3+i^xoFo6Rx`O$`&HWxN|bI}c2K_Pu054@v~;PR%8AMXiF4V)ewlR}H;vjCcd^xH+A0NNE=g`yB)0>z(WZ-)V zOb!WQv={C8cze~+WoqJ0zx|6IjMx5n-U}>!<$Al2;-mLu^*u#<)~qEC5?THo!G}p12|EK-6ntj& z2zEK6x$SOh&H)#~ePt%(die7V8>f^{#dj<)U9gU^<2R;feXHhyA;IX37w&=ZN}5>W z{HO;`7u&9ZrWsu&79RA5RZb<^h=Y^0_FJH_>2CV=K$T_Dgw5W3LsD9rXB+kPfXqoU z&x`iz%-fGPgFp$AIR}r-%8Kw&(cBgqH~B1Qni{m;SV>~uzC+r+R9(??!NsNe(BR;D zuX)YliKwrt_x;kq)IJD|vSyf9k&*0l_AH6CDOx^=u%9I*DZ%uGg9B+)N)-2v9ta#` zI?45qz(hojf(*H|r}vce3QJVWC1|bwdBi~-xcq=F81arAiC4dCEGdz$o$u^vty1J$8b=D`U$Op82 z7&iDd%Q+F_zA+{R1bBi!eMhszG~IOMpXGPThs!*66y+U%rt{JC)1|wKwz>Qv|E$>6 zEy@H2BHA0VEmyz)Ml`wLLo-opP**(5%*I|0VGAK~&CBVgOrKpS9O58PqzLZSHA|pY zoan;kx51%=C@~K9HaKnUx%qw{eqSp@0aliZgv5?LK0ZBmkU}nZEDY{L^?6_8^8hyt z2n_-60Gn0R@ZRNt*9 zmJR_vqF>IFnpVPg&VQ&9x!m^z<%x302jzP?n?gdKOwg}NAS6ulsDgkUv@AG((Yz%F!YJn0g zv#D-(uU-V}RTc;**;G>#O{T#3(RJ06l(LG?4^O$mIrekX*k$U}m8q`1dc;&mRT?}z znqo)6(x(~9BS()J9T~E7SGvJ3Akd`M;&|*h?%*|B?O1IiweGD`X+evEh+M3Ifka@} zZL`(=gRm7z5|AItY+YzwDYxOYd-4P9ipn>|-mhtRyguRtB%j;X42=qLv?MDxtZLU9#J$dT=Mb(L-Fk9#M7i8Dw*C-Aa3hhZT-{? zYb{Yj7Xxo`RmN!*Ut}XyDk@+m+qZfYpHAFDKfW*Ze9{5nrkKVt^tSHPf#LD#p-g)q zB_GB*>!dnj*snaZU+v|9cNPw9&zgz&RrPZkya2CH<6B$pRVt(dU^wK7Kjfd~`V?!p z?vRp_YTMeDXNtK^lefQg>##`=d&w)@*~!?YM`L({hK`I|0N1!?K)BIz?TCA*Ph=D6QDh3^5a8<|^v(|c-x~4w_EfzmbTFl!7goaWe zX>DC1hh=|x1a$K=k>tJps269sV%Z(I~!`rn_ievTuRt$5Man*UQzxlvmN{Q6Sv;+9Kym!`+bUOnH5QetZbv3f?F zD&z$byT3UGP+>d3{CI!^32m(ovG?C4eYF@wtcp9^AWAKtmHbfS|*4*CC1Ht>b%d+ol&!N;dPJr@{dxhA4MAIYSf z4Y)O!`4&k5vA=UpkG2e-_&`h;T-w2m>Pl$Ug_tKRNj7&H&3b^R>YBPB+33vLuD(%Y zt80$4pK@F}-rmg)@0Y)pqTxNE#(e4sK~hBZ_|=U)D3opDg6A?l4l^r(R@6*UsXt|` zsZ;1)elmrOvCVGL^zi;!dO0m+UBv^eWrda9Bbh_hoor&K8Oa8Vdl-}4I?EO#7E8kz z3b?{8&Io*xl9K3ypGsmjJ0!%%j=-ZtsziF+XJ7;u|I?J*k5PuBhSgOy6E|Z>tcI9^W^prv$`ln@Q@evzFlLV564R3r7UFFagE)jFAiM@ z6Z%@(rM4EpKa-!xR$0#PhnCm$jG~^i=HI$=9CQ2Y&xvmnG*y?Dc^T=%ou(N~OhAl8 zfk1Xm!>S9u_&~fV1erztVxAYq#wP4_wo`@RvQ7Rt`5jqNZ%>AXYfNP#<2nO$)#06H3tWkuX+ugDpLie^b*kN ziXSym)?_-G7P07<=q$uX?fT)fjR=G*k8V5P3s{&Mg9{!FNdhp zg`D!!7sXjvEK>GvC-zCRZ-sX0z!f`lb)!ehoX;2x*|1SnKjbrMAJzQMVozJ0LOwbf z@I>Hd`hWm3L$lu_(TXL-g%Dy{rMaHc2!Q#S?vN)h8E^(yphXNWlIv1bQXwr?KpxOf zwtJE~p{}X}l2hk|aqUI=qOOo@#8{;*rE=tN`5;X}rP($f0* z_2FTAAS}{rjU0S!O+kf{wyyNt1^rlkUGk z>vnhJ0oCaFZrI$$Lx^(yt6}>NFgtR!8WRS)hXDrV`>(|>yE#`HQ%I>yEFByGVlRID zPUp!bH-4PqGaIT*Ng+QzKc~x4f|~t_)ul*au_M~# zx=HqA@VdwsZrIfvFMjQ7Ss$JE~D*p;e`m;mgzMW1Fv*e<>y%NybuXQ!Z+NBx* zSS{>~amG41{XoO%$6zi~t%C0O)`)P(-@~kMR(*L)HnTldqNm04F)=5gdQ~2IXC5Jh zlA537^|};=h~ZhJ zS|ENkd{y?~P)3~XT5K?l9#f&6m67BdFK`&-5wrk^VCk zR+)t}o%Ael8628kkt|@wed;RJe|r%(n}9Hcc%z4G4sU?y(7f+ciKTLS+oHnifmb}6 z7ivTLZDzxi$>i27W-cG;kC626sU7;{zvkuTxn3L?iFlqAmgzN3Lu_EipUFWR8yf4&GABf{==7VHhVjkNj&Dl`L2{LeM^2{X1^fD)On(w5F5}zc&);9#293#QHRU>?9dvNNHqm3h569u3RS&_2u zr8D1*89Bsxdh-hkv}T8Ds(hq2Hy_+Hk~?5J36NkCqMkD9SlV68W~CC^VzuPCt5fc0 zT$5*J|IBeB>U$ujC>J*(G8js;MZ2XBN0hU1l2uPtZ^atPi!##oIaifeu{VY5Lb!J zS4UF$%-Px42G#t@E9*R*l*~yAg2x}Qhv74vgREXJYN~Qr>~PVhfyWw29dDsc4cAVN zIUo)pRm1LYPb+qh@~FLb*G?fW$5uFa*fp`sl4#RWRJ*vs;twvh#DYl3>RDUDDgIYwD#MY5HT6cQ3F3KGXbiigI@{ zY%LFz6p^~kZZAu(D{X1%qTJpForhi9Kry{`nwBohZ+FZkz|Z921D3n8s<#^myw`gg z!PQ!6aKB_=qO1@Fnmx+kFldarS*C50VR_^I}5CSMS?*a%}oq{I|4W(Q}K7wum&b8(TpE^dD`+t%Z;I zffp$e0V!zP5Yr{+V~=Na$!E7)Xym~+iw1YbYj3j>$Vn_;^j}H1T{+7sDfl$vZR`EB zquv^l<8T*YLF0usc5g?;Ytxd{(*^umho$IlGK8FL5aA%G^&GL(&fHPSUK>va=5uq{ z=ejxe^;%^ehkMp`r89(-yf8Z5*9^Lj2nSL`h6Vhn_FVeS7T0IH;Lgrht1Og?t9{ox zW@hUtLgdsxq_JPsb zJ-F-cES{I&M)KJPfdsf_3%nCTRyVT)19Pym4*{Q3bcAiXW`e)kE8}P1O57Zv!Z)qz zhJX<^ioNVOF^}qEE0^PZKh63*G`7iyWrXH7?b9+ht33-^lFgpcFSX{$yq)II;dtqi z#@5%|(jps$XWnXQYg=uSGJTP&M+cjN>p?sQWpt_?KYQdx^Gyv}Pg0lQzkbjtG#}Ud zz#%SfZaNBTWj>L-HuoEtdK3@@sELS(03TG^56=pQbD=4TaGLKK8Ibr@(+B_n)1(~Q zztw@SXnV-~@fA>Vb^UKY>IyOPTo=#I&g%MkzkeUz)P_*MV;qrb7K2c-Ah6ZGXc*>x zn;pmLDa6q?lp{lE;#({|Colg=M=~6tT>fK0py4D|CBo%OhZM6U?0TlATuDGu;!;v) zH^-xMp?Flf^FIwou)a$E_HC`v+%j6#cW|g`Ii1#lxb5rDK|tv`u=>7G+9=+Ab7znu zB5d*_D_Zw>&GUR*zu)k?>tkhOTettC=e<|ATd<6Z19wrCNv(ELAbi1uB@Z;`8 zt6U>U?E1PHBI|A+&`)Q?>XmNar|$_W;n1t?W>!qwennUi*8=-BXH%XcCpyx{2HQ4cebL(ZJ4IM8L4-PHm z%3XU67eu%g4`aJ0ITS%TuT4icT;1;p)3bT#;3x^^eC!;c#>>mAD2kXW*BpfIZT1K# z!igZ872%n2rD0$i4N!{7Zt>XRd&3i~%It9X-a?a}pD5<1BicIWtq*Q~IM~<}Vwfe2 zmomtZVE_Qo*nW~p=WXa_5Axm!8HV6bd{OF^m-)|i596$3Kc{V{TxwkjKR;^|^%&TW z)jS-gyX|kizuj;0yf>V_CHXWzszD*E>$pu8&1o)E^8k(~bgP^OpirpH#L2|%P{A+) z7s#G`YI{`z)TU7HzAkm8mdOB&(lG( zl|$2|x|3XUeqW+=Tc)Fmo`{rDz+l@{X=Sx#tM3-iW}xm~3O$#{)nl>rk&g-9gnr2G zYjw@J3+S7KxeTYJX_w|h#(m@+jU(SMrn3x=U=#NeOv?JLy22cwQ7;rKOftD?rO0Qp zKNCNnc~+W*S$7uA$;QrFw>^u2gM;_V?5iQp!MRJxz@6PQ-*05xIaBDU4Y*AU0GI72 z$pPN%my3vm%>b)a>9^vMX8YsAK&iL(NaYQN8;7rBrBA+b;b&Eio@G}zh5Fz})L2^b zMe>C{V`j!5r#)TjOG-#ML9+##!?ySehM%|30b_?WGk7)`6|VOSYnUGVlIQ^IfZ|{# zz{K_Um;(+zR`0Q6&G7r|-GQpYLYC1%nVV6$WG)y&6DzI%-d}9RIxNuMr(UKLfO`+d z+?}~7?W--Sg0T*=mZG}^+3EI1IjwQPafo3*5X*8dFFZ|7U*046)g8fna>%ABQhXA? z&*s}-uI{exP6yo0KZl8DKc$GB)?BivReL$Y#>+hL>Ht^BW5Cq>?AaW}C+OHNf?adw zF6f;%`$3r%vNq3ts6w|b_A86SS*zN6z%^QxU6*Vh}&V+nAcw1*SG+a6KCY6-BRI;p9pOJCx^t2!2M=R@@ z3XQ1y7!U_2viFdwpyHf!#N=c9Y46I0_<;@Iy^FJR{qk+a95f3sfL)$p{PXGACC_pk z-`Au4_peV}Jbms3%+Ad6C(6q?pDrD4*f^9EmY}ld`*t5E zajvA7S+f|^2xI#M%g;tQn?84cIpi5ndQ5DxdbNrgIu6*&;hYNr6e*T2sZb-v<29*q_^S#I#(EowG!TS==ZEvkCoefl~qOotyM=gPQ! z`lFZtL9U(k{r*5L9Ll3(m#jCyRjG$bp!sxmRo~}S%?lfZR9b?8hIZnmGrt=BDxg0m z-eX`1{L=}Apgx&XuhThc*p`1iUm-n`P1fW{rXp;ipnGZ#5!s%S)w>^lYwF`!4nG$p z)gJm*Wdq|y11eJU5r8*F&F4C94K&q&MIH(i=0;C_AFUcYl2Q+~&SR z)pl9(?OXr6QTf_PAS^49(Y4tkaB|t#hQUU=6ei}p+1Xla5OaP%)s`H9vLO{% z{H=PPGM$Rhf+o_YKQ$>yTw(N%@%FdX{R48A^hb8t7|p87D2;H(wBEQUv_x|-88~WK`Nz;0jIU0P>ubf_ISe~b-bl7a#?&p|al`dchTRTqPiBKZY z_O-ne@Xd!Wzn0}gRcR*txVFD%Zr)uuY`~JPYwO9*m)__RsJZh%YF~1*|^`cdc1A`#ceClmlYwU2&J z(Ts?QenoE)i4%U^i?@YNAE#=*{3%bP%f`*WejI0V#r+-+}B;nCZ5Jus)0 zT-IsWo!01y(QXb}PU*CCFJk4qW(CZPZJ`w;N}7l7~SK0*wd=_jV40XaB0a7vH7@@~L{e?Oae zYRUJ28TW7w?s`vWs!R|Hno1B);j{ewe9C>h+^3&*?mEOf=oGUA&AeRwOr!ABk}@7! z7X7fW&x$jh82cfnDF@PH#78Sx?YT5cB{iWj?7QMc;v=Gd0 z?{~MX>SforK^n45Umig=T%~rgC=*JsFR$+Lhuu?pzqcwatQq})+4wPJ(6gDEyCkILbrX&4dHGZ3qOj8ORSwPuHudV zEjOp%S#xH|c#pJC(?Mq7Yt-brTV1}V7$KuY8e?8IS@SVMN^;4NW5{gaYOpA`V7J9h zqUIPYS&!88pnV%%H+9VEb?SV%I&D)(#(`y|`Lw}q~g>12jpBp1J%BGTs1P4@= zk|Y@Pb+r&JRa6upw`AOC@s>O3+w9D}KnA+n(V$C_Ju1F$yj0s{**k8EaDlQ~^oWgI zed6CJ(wI(u7h0m`x~U$J@0H!GvB+}-9j`@|o(dWm^_vY2HRzJZwjSS#CHVm6h`v15 z&B`#O!1|J1XR+v?se0aVHL8lOD{UwQ)*xGM-d#BToGF$G9JTWp$?|i^4S6al@uZ{W zc0tkC^j<~epy#PuCSSa(qt%x({WhYduIF?oo4jeOo;I?NT?qFs(KW(tdP}zeZg&|^ z@uvNXp)YmX3Z2Y!Jpsi5&+7%p&%#ex`^DN`l2H$lt+?e#$77k*XpPY zI4)ZAA#Ch;KbNCE?|!Y#BVs62Hz?9pKC;@R55KPU{@HYRqoDlDNZ?DkAq36MkMF9NYoVrJ&koTxE%&6+`4;=9Jejzv2^;QCc0F8^^wV zb<;2CZ`7JR#69cpf3J#lv3R3qGnlSaEXtPcd+-< z*kdwkA+K5fbP=D%5H$2AQ;B$f!R@EQx8Ls31`es?LY{rusL*vl_5>B~``FE?d-==( z+(2hb&+!(LY@e!^;kY0cvf&SMuva&KdzH@TgJ6$hb12;0CZ+7u{BiN=<}i;2*P@(u z&ULaGoDvNfmAmV2$xq|0+*FP@dr-Ch;AXFT-#PHTzgV4Ph*+B=N%gBfP4cYt7Uj`# z-bVEICB-{VxYJFVA0y9h1tU7ZJl89i8KRotbHtAmv&(tZLxn)9>gq{ULV6!R{+7{4 zsAr1`m|aD}ViVYB6S7u+A`ymy|9RbrqUp-(SQ4;wWvnqqlzb%1`Yp$k#H*?rYSDwd zmg_x{Y1pF0An9n;rBeYT_sh<2xt*9%0Ue*FUgD_-8~&DSc%vRYvbQMb`>0v{~<8eU}ys^mnYl*c8wrCv|lgY=g zGGjd_01JH?p;uW^K-_rVO7MFrHL46%^>lI)K;M4xY4`SbWtmQ-ryPuLKv?#Kr zIlmtNWYFWy+2H+g^zKZ{((Z9+Z3O77ls`QvvTzYAW9-NVW$3NXg=W%V(k`~PX0rtV z#eSn{sC^W28h5-WBd>H9Cb3)Vc7MbpOw`4Jah?g3i;W|Mx#fl~d(DwG3~UQ~_rZ5; zrb$Fxw~4BsFweNPUF+#II%^pK_}C4a5cFL`JUW|yBtMTyhheYQ0c-IQ9-YqGYNe>q z?28AQb4%aevGG!^R&j_lk=LLIG+*q{CjT@vE}U*mr3HBI3l`r{Gl$18}`K`hK84M%bB_U`UN1YDrskYV=*tFY|VLgYl4 zox5#RnV0WEr3Ha0;5R^2DO<#41fK!SjSA7I&{^Tt$Na{*Nw>U2hwC$of$4M9q!xo* zlTaZWw&madkkxYDN3c8e!-=y!K`I}I@O6v*VIX04 z5k$#?@plg{r=3(w$OdQwA=#ZA@}2z}?d^cN3*1#6sZ?7NCQ2uy+#&bOa(1ongN|HW9WH9kU_+~k@s=H6sLQFjMISfeL z;bnC@$!#}zgQWxQ>JXQSEfrXlmahZwTePt0g6UEYS-xj!E!8_DJ~#Q(cz6Z_lg2GovvX(^D>}c!(qQf60-4S z;QhInm;>h^q};$9&+{wgKmQlNlJDW02x@5_>Rk&=(XgCja5ATHdp8MLLNln#^Lo6b zqJh^ar@U&VS`UMvZbhhw(j`GDhPp}#pRUS9j&$w{IEYv_^z&6vVC;!PK!4YoH{PW0 zBmkg=kfR4V*ub3K4fB#-?FeaXeme^Jbx_qq$Vw0@ZYC0N#bDr5t|%({vFN^vhgc5X zgfn9@mL(%`P7DDc)_>zRA{FGVitS<1YPVke!hu!}l%lf5j%1(L$pXX3Pco^y32si} zea@EJuA}tYrmb`gG(l`hL*Uq0Nu1u3Z$Y)r^p*7#9sMq+3Y_kBU#r%>#&EKhR}Omg zU}=^MU~pmN2Bm7Lnimv^pJ+=+(BX7X)**gIl=OrSBo*Y&o|S22@4^xNoDP-=9^Gg# z;9GVZ57e@)Ir;sLSd0sy9}jg7-m-F*Ro5+E_WRzCa9;0OkIuoDd^^xxWWVx#k?P%< zY)H|uxW5{#+X;$kt=yRAs*QptHRSAhfD70jH_(lcS45q=d9Md{w8U-&BQH0L>%03- zRgR*~6NLHs5#Mm$pU=_3=Jxlb){C(s-f$m<1CR9%t_|@+>)WJ#E>PR#6ox^r%l$%g zr-y{{YP5ECt(W=fq!5C1ilqNT(^-QC>-0S9ArjBXe(#-4q?zvKB2cI-ax`#R5eye`p@qjuCjHTWv>n&`%QWT#(b zPJl2{;WlYX_rLT8&ddnLBhD6UbH z7}1MIp-z?^`mP5coZE)*CV$ajt}<34VN9;T>+Zc=#Y2(+=L|(9SaeJJ>e6^ViO`># z`cjYTDf_SptKiu#nP%B#Ew5;pxR*R!$KOfXJt$zT4;-#}+2DF15&ME>LsiJ3f7AE= z{AbbJPFI6817G%B)rC$>4G=q_DYEUyrCckzu&?H%Wkli_&>Y-P#3dp25wC_!QZ^e2 z8}vI?2t7hO@^#0y%=e*z;%<&14|_ zT~VR6cEgwhxiDyl8`sR(RAc%Dlb~sCuAUdM8mp(q-qLok7P~H#npOX#doV4gkp}1-@9H3)SD3d%U?yr)G<4@rc=!(LVOG_h zn0acFr;@jHM;@G(L(l$|^Y^kO*JDO?scn_4%^F>6^KP&BGMxFFsm=asYD54crRTo~ z-ClX(tXXR~%nH1S^8+MtHhtbk&dD{>>Tc7@ypyB$5AWm0%@l)Uj~0V$tm*TNJ|;Vt zRaH%z;Nr$8h6=mamDCT6G#i+GuIsoaSB79g2{6~UXncC5o%YPXI!{%ec6fc5&OWRD=$I6b7zr8aw$>u#wt6A$?ihzT zX~cbF!RUnoUYch1?&7hiV7%^Vv~EscV9yuu_3P)PrSD$UH?_Ax*Bf4>UEb|rP!_iN9*Vj}N{M@FX_hvHk+j6sSNaRy9n~PCiA_}kso@wo8Z<`GaXPe{3 zm*E{#hLy&1=t5H7yp0+v^iKl#GL%stexVhPo+KwZW`wm=vKEsA-MXc8x9c|UGqQf+ zn$}}gv1!v*<@swZx;V#293Xjij=l9vZ~t@=v<#0^-C#^slJCaFr~I9wm}{}O-Fyu$ zM-OpXy`CAXrF$_8RtJQTRt;uGojkaCi*{cJ)( z#6wVudq|q=Y&C9P(tO~hsQu6X*<`qwg4N1IZtDQxX~h_##Sf{>7<22Vf>n?ypC3}` zf$&&c4}x+33Zg3ux1cR;aivso?h;!rMH)das9rdJUX3FGxp8FoQ+mcI%s(J<423I zzaIBq`05+;EO<*2*U$3kWe?x);o%Vy2#q0`mGcH%%(D0A{Wfec8x|Y`uKyUlWlJ&tJ$`IvjKaB!(KIN z>;PTN7&E2gc$UL|O&h5Hw3tuuQGd!*XFWX`H;u6m^E2TwlSKl6kOl)zpchxL%|KpcUv+H zQG~o^hLvhz``U1<^r%1dRU*uM%i&22>k|AZ~pYFGwezBFUo{k}dBQja~H$Xyu7 zmz@}!O6Y33+2Tdt?@lJ6c{Ci>WZdSo+4HG^tXk>|Q(M7He1gYf4EV{+$6)c!t_iE* z+UK}3Q+P^ooJk&ijA8?RvWaeUI9kL`{Uhscc}oXgZzlkF!V_ixP{0$p&$45c0y)$2>OQI+SaF(^x`%5YLs=u|k< zCN9cLso+{S`G0=wJ+at-Gjs>VqBwm~hUu6C zRO5Wn#b{E1&65bfXqY!cbONLn+M(!(6lLHla4&q!HGP7;8EypbZOtYs0Tq#rlMjQD zmlo-pjM#Q8+-$qamPCd1jOoFcPKcubhe~KfPkDS!Ft$;_8v6n|IZc3ul=k zuQ>7sy9g?7_xW;j!xlD1RF9t6r;6|kHTo?tIDva^SR$G`&vwtge^%Yt9ugzWN3X~Z zENlw)i}0MH$70%WU+i?#n>hP20g)lJv}?1r$hxN@T~coMdKV@Lp~l#v0A}M%oT>#Y zazJ9q#IpJhR{2TVLw~T+^&mm5dv-L^vh`p>XT<#{ zVx6#y0g=T{X1A_~`VKz1n9W|OE41VJ1cK9~&8X7cK3iI-{+0dAn%oL~H}vP}#&DZ5 z=Y(P}xR?)oI@c6D?b1x$L|U!8Rr7IjzSh5k@pkDLju{E~OqO_5Ir#6)4_fWXs+f+r zqqJNwR$8WZOVm#Zx=a>f6YJU;V5Y%N2dI{SzGW z!Isa9dR&I6n#hsIDDWJxoAkvUx)b|}bxvY3Pib-i=j?&qJ8xzyg?Jy15XlwtME;VA zXyDalzJ10{5K;#3_%Bye&EHj%MljPdmDGyvZNI#0AxgTwad)REcm=Z?T+of zTUWUwaPZD=ziB^^2EYBR-7oYYQjS39NOj}yH!rgm{MuhaCLcgs<25G6ozBkgK)=kl zDhuf5*_jy=VF-^5aQ*}MEJa4>z>@1M5~@aFe;BZ|DTxwBIVneiR`;&%Unu(exTLhd{dyg14a+epc^?+2nS6jdq7neos_b46(At>Rs#56fL*-l$VI z1X&m4!=#ATwzeS0?#-(xyI<~e=AB%Hy3)EpYuWP@uC>N6c4~x&wN9%p&8GqN#mcj1 z=A*DvrsftW`&+aTgCcgR-H3D5$(HAk?%D(a`{KW}RqeULpBRY+fBS^)ak6myDDtf8 zx)poIxX^xP#~9W2l{q^gp(s>xG5=_)bhk|G?m-@w&oJgDzwO3Srg)K%NG=2H<1#|YSz(WwCY9E?+?fRd&oo*k4H=JnJGSx8^ zo}XCj1ckeU(Pj&s;Lg#Z5#KXh!niptd`M9g5+riO3i*`RAFg(I=X<0)0UCfEqi%=e zM$9?=bhjF=_xlb|iX`W$WC%xSdvmc;!os?=;(`}J>ahaUMR5U%JbMZWxhazPv~XhX zN6nFq#6S)_6>j4+ggH19qn+}uOgj<>2g8mew1jVqRWQTlBQdx9!&4XqORF|RS=Q>} z%CD%#jSc(k$gvV;X6B4)343pz?d=Z3mE6W44yg-Mw z!@V-QH-QT^ZskgiwuO%e2aN9%<0~F2LytuF6e&`J{ZMY`^+TmY^w>L_^?UYG%Pq5x zE9kLL4uHKr`+HHwn)R`fCj*xe!PtAkw%8~Qd5(%FM7lT&s{3#Ye_$Y$PGuP-!r0@1d zK)DlIQ5UlAjr|Nu@WXsV!6l9}5in0h_jVN~Fe>#zrWy!wF;aQCEXLR%=nuD6L_g0P z6o%NXgnH%8F-+I$`YkF?HQf(C8~dCSY2BuIO{AMkJHNhrJ#4prz5e%XzKpeYJPO%? zltnae%608snX+73Hgzgi)*Z7E#0NI?8ZDRVuNgNfRh)13puE6W>`Hk!!Ld2rVr5m_ zUKETe1YSzoha_m^tEl9WTaBWc4pxFe3b-H88PtHB5WkE@Uo^p5Txb{eu0^`pkC1-I z?oboA=)7Urc^}nr^Hrs~VjLP8KbJ?+*@@s3LGVFQVd&is5>(^pdh@z@bDq&jFvMr4 z#-V$4Pl6kijc8n2*z8dJ$QgLKv5NIlP;^2}ShyylyMX7u7i}pF!NMxpFnp^)*qJFx zh(i+kbko`H*xU#;ZtaOhAQUPL3#F8*4#LZN)kc?fA0WcMfZ`t=wIly>}D$?M|& zelb${cqQl>S$XKEyFicngk8skFRb_st#9@~*LwCop^FQ#ge`BuwwY$1gO(2oGHA&O z(!jUtoC~_4r*w}D&yPB(Z?c31Xs%s6ARBqIKXdk`3N(OF2!y2gVh`3j@Rk2pb7Blr zh5%=t{qi}>fASubyDM7F(RXD8w=hxF%~jb%y1d_<@y4_BHszsGyxtVWHpaJt#_#Tm zm#Z12%lin7p^=E%3=}FT@4rQ6K8=+ntDG`Zv$DW0gPv0)Sg)+Yxhe}zeD+K=&?BjH z)G#ND%_$r&^k2M|A$zV-c})!iUcrSwm_*{fcgxBf!&@f8olmMjuojw?p7v|k+k`Ii z>Z-|nbacqY6PDvC+Hs0LgDWACKf~1wjC4C3TGv%(mL|YW`uf8o6I8p_MtXFWn|)yPS0Sb~UFTvMUIxBk3!bHl3u{?k<^%qW8(tT#=d$dGe2Yf?$iXdAN= zPo?>SkfiWg*6q{MRylcnQ6J3d z@G7`Z_?I2Mx4k-D_Od+KE}AO6n+{Il#{=TW=IXs%3=9vu!*Q$-kItGAm+G=MxEJj< zX0u!0cf-FyL74-cbN)VC#hn=6i|3>RfP_VK(Lu1z0=R-56FNx%^$8|XNl%N;%v79Mtxup!@Tw}etvTbSWE@^ zi@QPatp=_*`FdGKR%^LC{AuUyrl_!##N3kqS!)ALcc&|b<1x-O!_dYc!jXf$RvZn_ z(TS#SpC!|Czq)E>lxb4kVBV25r}=@*OoPVf`YY9z&Kkm6@;7~F99oj9zdS2pBi)J& zkN)AmMAa_|Hh$6c$&Mz+$;Nl;a|h*Lp@n$< z`-v<3-=T6O)S%Jrf>2Cb&5-V2eZ=#K#ZZX_&sQIgF-|tfuTFh|`z}5&)6hyN0V$!G zB5P?jUqY#T+4EfihXp_1E_dl#DUO1wOsp7}o`0ANYoSTZ3omnf_UU^q{t3%R zb&J^Wvi)N+Dl#uYl+wMyE|;*%)oW+f_BdojZYV-zF9Ojx#6LswR&=)=984M6G2d(+ zzgN~$Ghq5w_d~Bw-*SQO^lba>TQC(Vg$FX#jV-JT1aD0k30gInc`HBlryFQsG|QEL z`y|wFwjGjpWN1E!fQ7UwSC`ewB~rd{w3dpzzG`2+I|&-Pp^cmg!tdl`LdEmm9==>N zC;T}vgbDOmhCHUp#A}iI^QWX?AH-kc{EyEvDs^n>A*RsI03 z~MAXJQQrM-VTq1BzU#uEXQe zzfZK%M$?dw#l$At|4W`O(lL9FqJ3>`e=7L3xof@r4qYOrb&XkAa zEOJ7=qjg*whCv2gUPrYOH`s|Ys7oY_?8u$Xqnr7kf1>7_H(twhmQJPv68HL~g-KQD__6e~+rQ+v)8@>eZ@K9y~!;rPdIEQt+q*Q-u$KYeQNKm$Sq3 z=^S1=zx-riFyh)N5xC6chrgo&D)* zpBvU5vfpdZo`^PDnE4QY<>!`y-Ac5hP`_sX-0pjEF|2m2;iGl--n0&OC3|LasGN=@a^j=sSv z;Xrp2gug&*t*UaUK~P@RS+TES5%J~EmAhiP!G+IUqBIOSyz)ln>e`k|6<@-BaPUQI z=UILG0_hc8XNFy&(v~%&{7+2<1E!VzN&#-|k$bFoW*%ju(&IV79pHMry5<>hY`i@+ zITPpdV`}$!tUoOrGInn-(J6-ONj%4FcaZ70F>^bBsEze}S}o9_+2L@tq|5;0Z5DvdZP zbbmrPl8m`&*ohyKKH0?;tDY-G3@c`I-RVYo!OH06%pDnC?)#WxWkXYgFZH|C;X}8Uo>l?lY3n{| zS5*E%0j=GFZMrYI=k{o!i|tU>#$Klzc|`PA!6%EMr%XLnl!rX9CGw*}ia7U3^2FTb z9S?-dRWsz%;6RWeftCx)lp&!c%j6i{d$zbEsmkbA!5oz&RBb<)JFS9sj-ot?65_*u zd&U@TU_ME=xwTB*y^TINRSu7Zg$Q*Pw?SNvGux<+@_5XHLa!{(ZXe|<@vsc&Pfk2L zS|V2pP;m>>5T5WOIAlkp@6EY`U3ma5qz!bRFKOYkPaB)^x8Kju`$Jb`PP)&Qg3veU z6!K$^A+!e31a_g-cYZeiZJ_45@u}t>2s-?hJ3iubi ze{djbA;)Cel>z^iQ_MJO`;*Wr*G$^oUJS#I|#1P%Cx&Z|qlxU@wr?Zz> zc$BGz`Fc91m0F*jG1o1eb=VXBov8c7yyNe98{S8vXG_J-~mZwYcrR z%sYFXZ2<|nG`eK&A^F%l&S>M<)}QDko;s+zIi_`ZG}AD*RC28_34K(1m)`iXA~Ra|Sd7`P8p4|G}k$ zTOM-aH3%tukNt|nibU!acBi@M2%bmt3e$;Q2a=P1tpnd7!3~U-{x(UM)+@@N=aaAp z%Xj1Nl87+ld6+Hvz^wwW1e3AIr=S4UAQc&*^B$M8Fvo3?-92w2})ciR820e^=G7#(i; zzzVT)0;8vo7RKgrcmpF{959b-kCx3ekt&{iK7D`fT9cH&tRG~X5(Kf+^Z4M;FL7Q3 zud92I4S{_2xxpYuzy7_KesRGa;VjCP^6|B2=BWHgoDkWU6OjzxrHJMs5hdts9`iY3 zZZf9Jj?n$*S$ueJj!E>q5j1yLZIX)M!*e0fu$oz|XFYjXAz7ro=$1LI?*X!yw4;ys z!rV1-LmUZW`bEe53Ar}&C|x^aFf;4>eucI&`E*qGE2AWnE5r^PBWWuDohomaMV`7L zM0MM!s2C~1^!?}ut%4j2Yu0g)VsCmX4rPA)@e#3|`pVhWSgpqiK6xU=pt66f>Wur1 zT9vZr`i-`0OWKDhx4f}Wz@FX;1^9w*U|k`ewyS;kk#YmV4U z9QllAUrX{XYi(&~GcQ!f??`7XJ^OhtN|ngx(Zi`AknUP=Cz>{!Ex|$mrE8l}O*|oT z|Fo?B(sI)hMI6(>p_PHDjn;N<2eqP>bbe5pv6LL`;i`D zQ)A$I(|-s;PsI7TH|t>kgPZ3ZgY7A0>h17rZYEWy!2CB+F? z2xw~ae)B6a*5L^dJmZw(=HEQFaS_zx4NE}nQdVZ<$cak3V_(01UD27Jc6H?+seqRi zN!KNA=Is0@*XiZUPigI%5xCn09^2U15OdjmFDf4kWn#UATP-OIGy=w5NvKN>4E3zh z_-;j%e_vV?dHB6}5fwhgD(xx;@VVTdH*B!OmC%<(xHW(rUrF?Y&qKiU@S`7Q0xgIgT*eVH|`G?$W zj)&`zHW0q)i@E&_$$Z_CmlEdhw-=!I%SqF(8`SlUHn0P48h?_A7cf}TVro>-uMk#d z-jWWCZSO&Zxj4x(bRF$ z6rOL+3GidY%OQ9GmR@eU;0Pxq=AGKaI#ZI5)%{>BZ zTGCM4n*OQrcoADG#z8P$9V_2JM7cAB?`#<=@(9yb?fW_s-WXYq>_AS%V7Ig9es(N`M#s zyz&nutN24JfHb;~uc@w&FjhE8Vi~CD^Vi@Xtd+o&&F|_}W_WcDXI^6B-vxsA(LF>Y z(;7sRM0)$>8NAMTsv3AzsUeli)EiVY+Ux=ba=aZlZjnfHCe+JHwzS5O&$2?Tx;Tzv zm}#Zv?>itkhtq*}F1LpIk#?p{co*M|5R9lxr#<%Vg18*-zXT$L_USH1^}N;leX8_y z<{YYgs(kQ!hZOV>5nlN|pRnBP&zE|!tg-AJ!!cUSzk&f{hx%r>`L;Ti)ie(uK7o&z zDZ~pdid1tp+^fIv+wJHVPqDMzx9pX}Hzp|#>@QKh_I~4u$Kh7oGN8m~nVqh)o0UN3 zx^*|u9+fvQbCHH#kz^r{%~$+w$yX}6tZ|WNhj;1)1)P)oU8)Sst=!WzLPN?^xxp$-B|(md z{!T}r=392=7N1JrFURynMVleQHt~w1DTmJU=DWMwpv&E(%#wv2}Z;#zpcb^_w zVK1zJcGcUxuaskgIp1zQ3|Z;I+jE_sa+(#%#oz?m)|r_i)-u1yw#42ms|%hLjjqJ5 z=ZRrHYBOC9$wUNs6k%S8c4DbUqc##NcbCuG2Sct_Ne*G1ZurUAi*(V};q`3QjcCZ* zM!A*tu%Qclbv)7VaHzGzx2Fv|KHU>#O|VL^Ex>~jFF%CvspBwJTxLVn?;dsmh24qa zxgebspR&(Er+@gyxYg}Qp6?ZBS>L{)J1xVfBy&EhseMOj4ZOGoIbLkG#^d`^J6p{~ zn?U43cKKz@#ZITQm*h);PuKH()96;UYhPvJCn=4~cDDO0*1w5y92zj%4YpXW_) zTwzgGN6n8pU1v(QXEVM#Fw#9ZIU#1v@F3wT0`-)>f&?8pr>9K~t^ul8{#LhZzX62; zhJ5f#OI=urn*Xto0@eAu8wp+j+HC4qy^W{07T zo}(O+rYX_h$~!q;YbGPID_mvLf3$)-oboUw_T~om!phmip!}nUSRd^2w8@{y)Mii-Mg4 z>nS3&te#kwz1w7~;^7<6AZv1Kkv=8&U|E;Uzk$aqEC#ZZ*ZNd)EDBLi480+l(c=Al zvA;Wzz_Gvf9#Z|V(!Bk^h09BUdCXWi_E}4Q)tJ!VjfZA|rfuDmsaV5u`;@nsfVnS$ zsy`EM2i=$bfN($0|IBC(GMelEf_dR)Qzx{Cvw5AidI=JT6}!v56VzD$P{Lc%E81fo z4?o&th}~S$^4)r?DdBe;0IYzJTQUH>XN86P98Ar(7a>istX!vyFL&>hLsYmV4h-SN z=}*a$Tea2gO_Iaos!6F?m1f$HPqi3@Gm;u;56tpcG$oL10>r8!GJ15=f*4Mr$$z2m z&px#|6=0atnUMDh2YtV%)z@10#o~WYoaF#*M)=1PasOj4mlR6|eW9yjW92MA>Vu;? zCSMH7wQv4fH7+RqBmXgBUWxRP2gUW?jx- zyP>+J?B@O}Ne$ffe=3`GiH!bw5#V!A!SLa{9c<)D_iF8oUu497WylX%mlr=<60t$` z|0awJX0?sfV1NBu&e+$(9L8XU09jrJ#|nDVoYT9u8>+sr)0a#7#EGOWl{7g^@7*d*7yaLD&P9f0 z1?o=-4I13A9b@{O>FVMoSDI@X)W0did~)PH|EB$^G%_-C|L!_AH1kw?PiZ@cNk4z$ z9fXH4Lxa@ou+S?4h|ok4aNErmDYBI+@CJdtgbx+Zko0-wB>ftmoOvJ^yDHDyC9Uo_ zqUrOJjww_P=23I9{gFXnua4c-5AlN~dd#RC1S436GOttA<)SVZ6;DG_+%s?z|tw`;LYozfR_# z`+>+Jqbvg}@AJ43z%c1+z&GdO$;*|%fDU{vkT%;{6Bj+4V9BYiTIO<8vR9HD1l*T= zL4)s*9D*ll_s%ha9xyM6OZs9?_E(*x6GlBpE=+y8j&K#3rfYn?5E{9r7m8V(phSWU z6MWWAFcn{xD3!$TB~bD6xnXimE*=G4ff_+MyZ?~ z&^B?>FpE*C($XDgvEMIqU2v;n^9eHE0?0Vs`ZJa`+#_r9J!k;B!MfELMZ%=sAJidsFhySRBiX}$<@hyh*X8RsUfR6Qqx$u8cI4?hgt`3@4uNj&fI8O@yGlGmm zokP<@S>L{6{9q2K+vE0im6dZ25kg-H3|1$z^vvh|&^bs-WfbWlr6NbE<@uRw>A2nNFC#fFsxv{h*7{xV20j`RfMs(%P$}3#t5(4nNuJt}ijF zu&JzQAk5lz`d9UQq{T!S1wmWrAD8`E zrS}dsWW>C@*?vd93NuASmIE1Dt^x{u)UBQ*vt}bAn}imUqRmnX!b*k z$m{$C6Mxga{8$PCZa7(6JfgbQQ9{bHoz+Frw6*&UWQe=P-LUT;xgFMbS6_g!L4^Aq zKdh=lu!k7bVuV2{aG&JIS88solFssQp3vEQ6MxFd8i&y_+5_f z`2QWn!JIr@N>s7kI0PDSojx=i$9{PP9@>ed1|)-pc}^J*v3H}RZX;1YmWMqMxYN!(GDisX4oZ8K zL(zUxy-G}Trl=TnSCbE4r(!*LeUPHQeLl>C*dXhb0^}R9lW{8*zP>J~p*mU$np@GW zf3pa+%swpAVsndkvb1U0?~QkB<{F#EmlCH0S*k+V9W1^+pdlwD-;SMPnyfRvv@l%7 zcc1hcuoY!n{mQR zW)w0h27Sag4Z2Kf{!?se^b>qkvN!X1_?6ddPbHTsnaNAiTsmYLSCY&_>C>V=4W=i{ zEj-IBW?Il-hHNP_R_0Xrh*-QZqyO#Im)gU+wP;W0-Cs6th@od$vXl38fPT{)CJ&p^ zm}J;g>^wJGq2&4fRdIz)tt-~Hu{$gTKGj|Om-u-1?Zf+r6umuE6jboz4j4UHYegHD zEk=yOtWUYiocv(T`azMQ&7VO?5wh*_r>!1Eo6`W8S$1n4y`M17*7qm$6zVi_m;ld?47S{A>RK|F*$NK=iH1U4S*oH7B$4#lV35J(Ua*6%SUx%<_r2!?X+XpQ~5l=yL zJ`8IT(ibN&!A9NmsmP}wUMGbuQpCqa{=JRm*7R|c$q{>F8XLgT#Z}%!z_4Tz3!S} zGTq^{A?o;r@e<~oTDFp(6HZ+CS>q*B9hrZufj=@~(d}TwM|xPwW<)kUGxy;- zDCU9((CD{y=vhoe3L4jP1914f=M+@Nd4kk|GP?jViCe$zZ->{)(voneq-j0X4@^Cqu z<%0_CMdD4^AVchvP1x+H5uqLVz_-Uux>l#9js59oy&dMO4Ud-PmU9<;?xI}<4y2- z7~v$YEx}CmC{8Xg^0Lt#6}ks%nC@dnLe<%SmD$K_vFPwGE5BtA+&|$2Q@$vwu$nfD zsUmYX?d(q*eX{f>q*gj>C_W}Ztl;S;jxONe`L0T3d57D@IQ3>c(qLd(GJEOc^PD4+ z7moL_-*!o)?S!C(HHrG^Vl@O8PL9-7XWes)I^SJy7W-^dc6o89+ZYyM?ZCiQ_qN?S z`G7udVMOC{R9vWHU8AM?s~|b?5YM3`wInUH@$NW5X%LWX!n7MXL@h~Tx@DXyoUgd` z=lhU9VbZSg%(;?C;E367Z2;j@8e=)xgZr}Qomg}ym7w_bA}je1 zF75`q2=Y0=FQZCJ;AeYl$ID1PvJYA7H8z?36>%?_Wiv2sS8K7vJfP6FfN)<@))h&+ z^||i>(l)|dbNh7uVMwAX3DzVsU|J4aeQ%L=?cRFFppz_v$K_XgLD#$-M&tt=xyd?9 znlx{U`0Xy`d?hBM52DO< zAG6`)7xBcl+gAeyFvJoBcR1}=*mvqgT!i4#D->Y%bdY7AIc-tYLM$k&yMPQ|TIu*S zo97$_o;UEA1kSe(s#=dh!-AP!;89ht+d2|k|EeU|Ces=Z*8Xj6Fjm`p!5gk@8~-ZU z?bA|ImF={n1_z+^8{j(3s+pt98Spl)u`3hM(v{Z*%Xnv%t-Q#kfnJNpdbm4)(+M{# z=-E2F7Xu|rbeQFO97tpKOD>1k6O|4xIsOLcz{N705=TTRU)=nHtoI4OA-5jyDeI4uzY8UZ~UPx-3_W8&33Yox8n7eyGA`h-0 z|G*wAf|eJ^kl;0MFt4Yqc{godq}2)){QgMQeSgr{3&k!X@%#Rq%tQH+QzliW6$Uf+ z0-=EoQEMS!TEa*yZaUSRRTngbu)oVbOgg1xf=IJhc}B$zGX2hu($pQAq!CCA?m)=sXpVa z2&;iS&X9EnSBN<~adr6e)e1B#C^kUBkJNNN|?l{AXv`pKi z0GxnfiS%`vbs4z`nBB3aVGeqF(QX=0DmY05sS4=o=s3%!bF#!IQ0;V$C25N=f)llv&O3WS% zm%#Kj5^B6u=YNQ}GS0*&=L9_%EIh0Z*aF&WD&)xp&VJeSA|Cx0|G$6f zvp+~;aMy%=3zl;S_)RFhKzhGH<0Yy5ka2lSRr&kS!~K`gt@ovnj8hL`DUL+z9yN+x zSLRkF&uY6n2s&!^GYs9h`_CvH00+}tD@VZpS{44SmQSaWt28rhbj$TgG6@)dV@FKQ zVMyF?UVno|;Z)O=XhuBZ2hD>vTE@70+)d}Zf(j!2M)dt|O;gJl>>G4S z-MWZxEiN-f%RDJMKTebJHTlx_%PdstwEDi00jFdJ1Kk-fFszs7GbPO<)~&#yzEN?L zOcR(#kLm_0y@Cz_9|6ER@Qtd8qJ58k4fv)sN74Vfpez=e2y2Z06?l{DY1N7>|K8ze?G^j-SIwa*3(8p{MmwovfsT z+1k>*Wtl0;;X|jB{t}8`3FQINVScLa6`hs)P-c0{^a=K|H~^8TaofK%Qo>9r!%fZa z>oH=Kwjb7W_*rwN&5p{Yy0T}l*a`pXa;S`PSxr#C$|Cb7R%{?81n(`TWtlgx4aBo- z6I1THY;7hb93b9oQnJ$NYL&d$WeQ16T)m+imGM*wQLkJyY!>BLT%#J&)51K|{X2bM zD6eH$DD-U@LH>opQDqJ2=pVh9Rpsw5`>VF}ewY0t!C|}a)98h!j>vtBIRKvty7i0D zf>f$&#yNimjgu=!?O|3c!PxU(yqb->WWFb0@4yGoHvYR$MtY7CXRhHcC0CBQO%!pw zJHh-i2Mi3~Gv8(-6sEW~5nvcGvco@tXzA~837vMkk|XS8jc_E-2OSxeZXL4 z%y!y4!Kw^7O?-)aC=moEl=dZP*w@=?C@fKOb z^ScsYKI4q_AF4}g1`Ze}gPy4rlR^4Qa{g+>esv%s&gy%U-RhH${&4-VJ2A0&c$%;2 z+4C9-sSrcp1VXsk@tfO&^MIcxzq%{f$oZMLn7A^fpK{qHb6HYMnk)D&<3&m!;^g$r z+qjH*XXZKtsuR<@hOGbqRlw^P&)!#d+>}21T}-HJ^u#e6HX4}LI+zcg2@N6Cb*Xvj zuDtV$tm)G9y|L(C9SOJHd)~(al{f^sz5MrFQ+LrH%VbPTTwUmuJGGGFJ7VLd#Z#N( zD%Z%YjN`)Kje8UWJ!5_`c0VuElOFJKX*Ss5RdZUQUoh+lR*bs zjkm%ti%D~-S`HS5x+%CI-o^icPhfXdnBzMWMf2Uxw#Cn=W1$w5x3*1TNTg0YMs+Y{ z!R6*eU=bc&`H^64sTit3J~2KvAw}+8M=@(oa;!LT$R6R)xOcU|H%J~!3YrU%BT0D1 zEN{55vP;BmcQMeVWbfZF$}BziNh4YzfqF(uIhBlQ%YVb{t1UoESB`j%uc2!N(AL*v z>To~uG;h}Fo-{YXE7*?gV?$1=51zY3vlp_luRr`|AUNMH!VGu(3B?fxJ9LKig0&r( zC;eucRudFM)G=zRz_nKAMTSzamI%T3L^g#n*MY`Sz8mj8_Q=4KAMX@UPZsH(IPgH3 z#pmKP^JO9h?o|JtpNSRcO$s{5xX)Dt!v(LXm@MtMz5?HCMd8gRIloSAb)S)RLa%?wBQ#WKPF1vm4_Lf%T^1r`4m1^mfezBvf^t=3(|+ zZ@J#7gai>P(lGCdfP&jjQe_Bi5tKTrb*_Dw94;n+owb$q(ldO~xtq%a7gm6@a;3)1(|iQFyksbYJ#NyxMesdHe3$hU|L6>n}#4;O6+1HS3%ekS3r`+h8eK% zM==oyf^-1Oedw|kBF-07B58?-M-Vwy)w~*wOdH7mBkHY#qI}`1Y%u+!W9wd4XJ;rQ12@JphOmpYN2!r`7GG%#g#e5$_ zzl(xC1Hl!ZMsHY33VA3jZN(e>9KB;=Z2jAv8@F0Z&{>O2Z2?RyF?%gp&m|nqep|r-aNVJ69K7Ny z%^qkKs$&C#Z4UWSdG(at@)6=4p+vcixyE#@fwFpGXBPJk%|z| z4-VD~l0j|i0vms=;rz143c#u!IYDCa%{Bbn2Ltcd%uD6sbJ79m@GyQ`*y!g4aK;p8 zQ+Rr}3v(lnRV_n}VJ5oWVbT3fZ}CiD(PCXH&rJaP@LXXy3a?lvGvd; zHB^}Re?c3BK9v;_@a_(89pe4Im$*V=7#vI{eD#2;l^7P?0i%Ggb4^J zwZj01$x|cBvT%7`#J{X!+GX$6wPptRP{@0wi7W>A&4TlDeAfo{h)9qIHArqUKdnkW zw0U|;6F4-WeW}h5G_pSPVvI}JLethKDEFF-o0@Et_@iZ2c%NAU%DJBn+c~UB;yk#w zeKu5g51Rft=-z5od52zM6p79gPWT<051a6|gMd75#94JxrYVQ#a|yr(yJFf$KUGKu zX%W~q?pLZK5e~C8&|_K_JB?LDo~AD}V%gc07;}+z_D4RSoOooh2=mtfVJK4a%uBr! z1$0<&0zal$Yu)(SdxV`D6s9L=#Lg9_&A?!u-g>1QV8>ZN7TtsyIYgPP!M zEAXFNwJs@B78g7C&av2Cmu%3~t85MLH3ilG28C7#fb%f&0<`k$3`U%C!#N{XLsk*M~uKpwW4>buzUP~756iit;m%|TjgJE z4;tN?;}u^I$UCh|QgS_nC9qx@k3Vf^2?>;X_*tN8sT-3G)(3bQAUMA?P5P!o3%QJo z0)#;*KsX3MYn;X$`|P^XN;;;iFo4&i9QZr!{j4TKKyTf!={rPjZ{sfc3^$tVvhY>@bk>ukHJ0{k< zgx!Vys-)1uu3z2Cp1hpHScB2NAF15w$v>F_r*FZQ-ZlxD0#= z49wsogCf?@Pet=79Hc5f6;Ebn=h1C^YTeu{`ykMCF#RSjxolebhMl!wt&BYewxgT0$L0Lw=Al z^uN^27IS)a7JJn4eBN0R=juPjE6Ebs8TFO-zoBV*!Z@Z<-Xs#WbKM=}+?#CBx-V)` z@m$^I zv%JP6UK#BV_B`KxS#)rAd9kJ?S+CS(5+Hm-QL_NS9dZb8;6QJw<#l8^;GeZg3@bu= z1$wP!1QJnRp_k9t&K*j96I>T_flh9!xPY*1Wkems;xTxD*fsh{`?b8V32ozap8k_( zR0M8k$8APn`LeI#TDe2ax>40hzRo_?6MQUc#Pl)W(DaS0y4dFCGKbN`>vT=l4zeBr z84nfmsgIgd9+Jx!$#HuJlsLg*;SOY)4?2MZlZoa>gp!cu&Ux%@L9@RftHkPvNllSC zLm^Tjf22H^5ilF38lu@-oR6+&dMU>L7#<>REgip+eA0TA?{#-Q>kKcuu$lRbw{SZ_ zfsT#mFvGE6)PzG4Hs2`E7)o`5A=x!6CmPHP52NJ$8y^3op(ax`CFje0%V-Xf3kqoZ z1JX(ASUp>#53jhtuJC!(c^LJY@e;i?2=C;U{%0en69!w?2nd6>H9+wEH^{Fro6upK z7tPbZ*k>K?1?Ax|hmuTZt7$-yv6G(IU;)^`h?7dzC)0JNvhN zyN4u`f6Q$6`se(8{029lteHmT{8|84QlRdTsW)k^b^mPq7tV=E zE`mPB(imKDg1mXHr{86|7Kty)@Lo=7e-?DMYz25+hA>|@p4yyWD?xUYdHmoz9|-6; zv9Pu`s>VaAlEa=uqAS9VW|C~_H^&56up&Q%jpW=#$#GItUeDZa%rcmgd~H^>J4f5s*|ninv0jjS=mj4miPI{d1>q^4rK&Bb12#*|hJEUexOh!sqlc`HEwchUWQCdQuC)!iAY{dExp!>17fQslaF|g4;bgp zWC2_*w1|s#In-{)mi=HC@#AH@`@2jj8X@Kx%UGZxm_nIY6xF_#BUrZd!S z`1R8rHF7GJUr4O>LS6~`K6-&W1$SHaDC`4dce2j}kNL0o{Cszzp)C2v)GckbZBk9<&fiSP;G?+~$Y&v3RDf0QM(3^ZHCV8;K<}^D;4lXn& z-@NEn5)M1%?!pQtvskn;o74ccgh0jQ#3oGr!6)|7V8J+<-SQ zuCrETqF$+(NO{-ZnCM6cScW=0DU=)_g^ZY$w! z3^$A|6Q!KrR0O`B>J`8S+$8nAF5f|+*c$a$gwz~->3e8ChXuEQbs35vj>XI-^!0_J zJ@oJ0^n$WeY?8V90D~^J)5{~?B3{39`y`{@IeY(-HQMt{osEYW|Kt!1$3BMg-a`4< zBJ&lO13Y8Ce*#hO+}l$5U~WoM4Z!QfVsk~(qs2t__6xKqHMuvoa}$AJHORLQtK46r zcAdlvV_%Ni#IpkDHOIwz7!O<{%RzC zb1jCo5?{8o{Frql`p02bP-JMJ)hoi-aQ?Ah|25%f9|H`?JS2yOkPY{%=oecOAa0l5 zV@Tg~*lCbo&#e%}h1)MNE4fuW_i^;gyC1U=V$-qec#@ExzB0>9&?B{(zPIW<$R6;a zRKSGDM#;-{{Ebb8IilDS)LUW9-{ln^?NQc8B6f^Lz>xTmZ0Wgr!VpWtWOQA`_$7_0 zq_EmvLd>dowKo=h;*TJDG;kOG0Q6@FCoMhY`n!Bp+h@Cq{I?(zhoFs1$Stw>-9}Mm z);NWJ;;3*=$EL@sTQ`YuUNI@}=XySLHt$=oLFiuRExX^3YZTLaJf9 z=ImfRj_mGRW-u?ySmRcVeA>oB@u3^0yrzHu9hqpj9P+;s)4&lBFGLnMd$>4?`g-T4 zz-0G%Uk?ZG!+YdbTW?1RuTJMf%&x7+Zf{b;k1av)Vg35`2<}ve&JTsAM2P)L4)lTN z>F1Ejqyb|o_Lu>yPJx6g;%AaWtfO1!jbh$B63Ew?ukIREpk$wk5X?(pXs3%+04Xno zdLoE-&F-)ir0ZJ)Z1nzRJkRPQ(2au+@Xp)e6wfe6XE$Ec#Z)b_9tvZ(IV?T_MSGGt z-4^dRjc3pieZY~_yIrBjFi0jP*^!PMa9ESZ9rg3t)5di#UdhqU8V2TZggl1n6$yzW zuhvf_m=$V3ac~FGU-naK4vM6Q(&0U^H+9(pRaC{C6!q=Tll*wCW)E7<&m`)!Q6 zIcZer>)XGb{Wd2U6brk~4G`bDNx;-`>ZAX96*K)h(ETADraOeg$-Zf7aP^Xd4E)kj zSMZ8>R-{KoP0AHI;NK?rJX2%&RLt6(djv;g*k_WlR8i!n-t%GOE$;Ts`AleS#uv+! zu$~!B3!BMvB~bd>0QsU?xugL|Z{}SU$7jceFrVp2cG6Z6-^PcK&PIg|AoRw2CbbI9 z+kA%-FpMLKYr$dRdG`wU2y?UhC$`gK_09#4|H5!;^YU}tIE$>!GMle1jTHH}n3{J>Wy$fOHbe(T9Y=-MD>6tQ9w_L3OuU^a7A4x_CryY~c zg-3rE49r$&MMlb0 zK6UkA;}|@@o*AF|#?b{Ti$~dqYKnMf+24D*)H$}h34K)CL;$0O={TTg0}W%kzl!_h z{#9Q8LL*dt$;e`+gH+vbCQa0LUy}kGB%w9NbVenH_j0aQE+2H-#37drvmNKlqxsFM za1wOmEnmm&Y54V^enVn&MRQ;Kbg%IO8hnw|zPbJcm4$M*`AkD3hfhD2?3tg-ZgFod zkVIe2Gx-5u$h>DoGrOiWq|TQ=2+_4~9lTD<2Y z$63!x6Qq90k}x@jE@_)>5HE}TiPTFR>N%n2Tc0dwnk-=X#rjgq4lt1NR-0|gHT#{$*Mb0bv|b{j_*sy@ z3d3;yN+GXAAdV_Oo@29#>kv^nD)Zfh^(mjh6m%<#PV4A{euu?erhn5bYxZ^1F)!Bj z$2&Z~g}9Bl!;TXbo5nuS#_tk(nT(NZKXv-;Y-PoYjIuFWcnL)vSmm2OS4)|6~htf7fLnwBy_`@WmOrzMLl*>W2b_=cGD-*eIPx z^OUUoRW8j;RVe@8P!d*hLYaYvJR>GZt|@jJ4;DqBA)e$S>ol4Ms*do+rqTe@F9^w7 z^eaKE4pqA4yNtTQDrIZGP4uDqik)t#ZMh)Z8Cx z_PnZfvzn_NtZ>gY&NTBg$h0mMJznfSOS9e?{R81~O!Mo1KwmO>{+LyQYz2z&)=*Hp zs+h>5v7j?uraPw6h*dFs?;}EX@OWNYh0;x}1oY^!h-mpbL}q(~_7$nJ=ItZ1n?85f zci;3ziW#7JLHXIp@gBEO+@k@mAWIZ;)Y}(?U$py~d`FF3krcl<~NLCaK)NGaN4wL=AdC++;f+FeT>JM%>l&lw0q##5MJ(Clw`%wmvt z=VI&qM!x_wRkyhf4FvR!;~i)Lk9j+c zG52z7>05nsc)u0TH%U2fItGL{MA%C>4CL(92+q(9ODymAPnGTo)FL9!sgPq8eUazu zgjK4d594DXl_G4(uF@u1?*R2&1^d=S-eH)h%X>uVA|O2ol@NDSHrPn@jS z#+7;rj|AToxL8(~KElztgf+jUe;T@BT~vC|97nS=*sQ*g>9Z4$LE0*~vv_>?lPF$P zmoR(#m@Ogt*D9olDwtb<4JZKC$bbfim)vACYY)B51N;EDI=R`HmDw?l8aht>#6TCnZ>j6lc1=wc^GFo}2pck~A8R zGIr(DKeppzlyq@Lk030}`59x;6x6&jMI4MfnqscxQE=a@&AUz|Qw6 zIKvxyLxb=9l=%P25i@B~8!z6wUxP;sq>?PmED5I7Rg19EUgs$IpXNm~@wuRIbwxcs zSMlaj8{pxAc?6oYNe7!nMFRwZjMknBzPW)4W z%sh8x@HZ~A$%R^itpwo= zx{>J~X<1^6ZT?YJres|(&phe%s>Y$SpO(JB{4p~{7yA%<9wk>tf0b?#EoW8jk~tK+ zx?w%H;gR>JK0z;IPkHPv(KJD0iDdyRIF%L5yz$%fD=)94omGT^!YL118X;+;q8%5H zHdEsk`qU61pOEn(S15tYtTzIWR8@8!f<0i@>+cFrncJ0TX?vD`nGbUc>#6y$XLA2g zltL$QC@0=Q`lI5+{f78n_sI1YjXK!t(?0<}uI^NNG4eUKii=g*8NIU=ttxGBHhB0- zL7pAU(ci#{Nr~qyH|=a@14|vV@agk|f1Yk+IW>gO9DCn9CLm?fhUatYS`{~0*oR$# zx!IGZGc(cqJMN1axO*KYpP5l~D|j5j2SL+o@CwK7HZ4snURh{#CBhWy!J!tp?rLc& za)z{I@H=Z|lt8VT+A@z(H>==rH@KeZ&@$gd=^_b25q?(4yd%Sd;{*8YjLqz%%bsHbfGar zm#t-dGn!O2m?HsAXSD~adiyHEO<%7{DhRskcUXJw$!0H(;GUSGH?UI)%u@*gw2K&X z@c{Y&vhIbK_5SXKgz~4HfuYWq?&8!wja8U#Fb7@Tv{@jz=h!Qe&)A}#AuiI(kne40 zxS5IW@3Gq#gvb(@c476U+h(6s6+s^8^2f7vf5jWXn*&0gQ3&f@RW?&C9 z8LX$IusgQ(Iim>Q^Eq^Ia2Uu;O-}x{X|~^mI)}Ib6agHj;ocX7c7HhsJag@Y7asu$ z>@=KtP-My;=R& z^Lyh?wlo61<9*;B(l9Mc-BT~M>6@u#6i+8%8K2%fZ3>qch9=o#x1>bRcFRwkaVUk_ zZ*#a0FA;;IUo(TXN>nK2m|a9^M*&T?o7jhe5I7OXa7s#o7fNVmm>_ zC`!IQb4bMaw44d@J)>;~Hz>!Ih%nKe9L$t?|KQ(48r_ao)goe< zOI8b%1j8?6`(*w8g(F-VuAI{q-|4A*2(PaH-j{wKR_fk{l1?H(?l&y)<^Ws35DS$A zlWkms%d_zk*4A-a)=twDu@!mLRF;$c#{t`K;e+*)OUc|kv7|`ZSwU%$YiAr0ZpsNh zJhMzLrO-OLxU(`~&llM#_6ONWnmmVf3z1~-)z4sG7@W#t-&_h=;qi9)T^e4&h^Rex zLXaHO7ha9)8H>vF`S$r}W!=85_@!DPOrBU@yGK5Et8pW*iD5-gEWvS`qRX$e#8B$j z!-!@8o)sx`Sq~NmqtoG4b>iqz8MN|>uaCX}-fTcNTu0cmO^v#Ow2K1 z3e76KG9irgrMidxiKT;Icg;2TV8+n3FCR2MLT9prfE#T~-N(&xQz7C_x+#+o-N+vD z?ZDpSu!_c&4jFjkoyqY~4SN#bL`|@s@Sq6Z^@A(X-i}kvJ#xdi>=o5c_foLutbp<+PqfxPi-7&E5fvL*0D${$cG)=9G`+7y% z9Pd{f{~-1Z6FH{B#KnaclhS56yGjtsfJ{mG2jdEXM=M&dQM;Y)wyleB8LW7h+(xB_ zF~)Cp!2!m3?3*LTX>E1GeXGaqYee~D6D{P zVyOPB-;qyS^$kYkn~15wkE!YLZrLX^Y;>N+W$LCjAREY6VdvYXHX^aM)_)MvJ!f(o z{tz>t6S4KZK18jxHd(NcLYHCbOFkg%2t}+AZaGYGR#)9^Wq>^L`j0)|f3^$IZty@g zDtt=`4SzJ)|8%Y=6^aP#B9L|rE}4$M=FC&M9%#_vU5b8<)@Tpnmq@Y$WLnlff{qSp*33iL6p zignf8?*9l?$KFrlx2~2|@c{)NsV50!o_6x6WISyQZ}vn{+1R(R^Vo6fX!P;%(67sD zUkS+d;d5}e(zkO++tT9=zZo73W`er!b{N(gPIPU{@#D$CVH|gx6NXN1@Xe(2+^VFU zqoHJx*>Ljs0=NjOKJZfa6KY!4?ZbJ}`JaUsH4oeT2jsg_R3PrW*n)MG)J)Bm^j4a& zr}uJ0hRVh<+Nzya?q9oJ4rm7bxm-Vq`KJCbDAg_9OHRq$ZcU?UGKNiP3L3G*<8;3Y znQbKc@NWJWdjEoprYarUex)<^PNw{W9&-z}9I{`(usI?TK*1Eax}Qpq+(R8EXlFo2r%81ikTn}Et9!qX`&A46*CNoH<50DACyJb|J05g&`oR>s@s{G z&h$Ln#7r`o=_PFv4$&n=%CG#gnEJwLg@PL4_HB0)2rSxr_2 z5+JS%XLLo~`PyXU5^puPziEVsax%qEfy3U+x%}J$(((uJeewdk-TexRPlmSO{j9 z(6xHMKP7@N>iuxo{+jV%9&xqJ@`ks~^m%FSfU!4(*mkZo+;XfooT&+AsOxzNnzi)~ z4Xsn@BaUL0wt)G<4z1eur+v!ub(no@8|0=pSU3bmJ3FKhVZ~0sy@!F({eu8e;B;wG zq*lRcz1*g~+$yiQNm8>wfquQj#^wU=k}FPSxXMu@CL11+*pd{MdTV_C>8HeQc3u4j zr%t&Amoc8N?=_xR(ZZb5X>_>x$vBEG%A#9j6?jZV?xifi?; zilPgeXec&GOtUHi9o`cbewMnn1pgVr31gv+OiU!b6aFVTNPz*2#zJd7=6t&B*MA%x zOk`g(o9hs4kB3Afo>#g(Tud$4iGM-1kfsET{dz4s?|a|vt3d&F+*6%6_nSG z#AkWGz{fY7qH=3)yJIA>Qeyh&SBZH&U#2fwB~8%!boKO@F({ssx2H)fu?Qf;=hr)S zSDGB|?NVd4Gi>umuN@rU%S0I#_+}#~j{w_E(QX-jjGJrlrG{1|pSOTIAj!0$?-EwZnKJF9#j(iGTAIMfJ!LU%5Rv z9fcjCOD^haeC@904UHh*+3XR)cKg^TsckEqd3!=e`3L!&$E#&b-AkknLT+iS1 z=wNLLo33N>%42;60mqO>Tvt^XwBQlYwN!o@n-Z4mtp!lgj@IlI)8D5mlLEn)kOKWr zm3w6_=v!n(<|U|)-WaBvi5g#oMPPZzddm9u)cCuy|Bj&HQH7mywYAi>$OPYL?GI~M zi_SOomZ0EDoYAv~P8kC9DQPkx57oQtj6br3YYQx(k1BpVX3K}Oe#u}0`xOuDcg$~V zgveqS)tA3EV*N>*D{ZzaoB#>%CU3Z)$T}M%3i*+o}qv;P~qxVl!i6DeLJgLo>Aav{eNf*v$MV)^U4GJO zz7Al&STiMacI1G(S$Z@I?IAAKJ0QkHkKOFmz32o)%G`W z{zKg^(78;%^7gh6!*8;rx_%TKEm6OODT(3KZFM-|Z0m~RE8CQlqQ7JSn=1st4*JTA zDuHJC17SUV9G;F0%9MtlQ>HW_2vmsf~l`I|hK@vQyoVyeWk&$JD&0 z7zdU>FpkTqxl?!;m6Gitp=7Q|>J%T|1y0o!tXrn z9Q88Oj1kY!5#VC)tA1M2S5`KnfN_**KcA3*4^`pq)O_52NV`QLF2_(>ByH%s5*V7~ zCUy5wQ>pd+^$E{$pYWkej3wR*S1Uo#f2wHG?-l(XW(zlG^uElOTnb71t@$#@pM?VI z-qmesP)twdC)@~j0E~f#30~XVP zA+o*E-_#weN1C%7A|3>v#6q+i%u>XA{E+M;hBi5wi`zZvHdlHxRwn)|B3bG<{fk1z zryr9t&1Wvqak<=pYqRCTQ@p7T6g)=`G(Cw6MdrM`Pn{uVXj_6B#llecE*w$;y(z|`^d^5X2V|9)m zAEyKF#hWmk`ZRjNS=X=TYMMjqoXj?e!cN-_|F@zVO``|U;Q5M}Fxs^9Pny6#UcB{7 zvYxn%6kCCB6PYEBUr)|lcW7wF0;5?M;_%5h-`PqhJ&h$SRP(FsC$@EOc)@>C>{93$ zKiIoAo#bTBazb8#U#G+=9r$RwfXe_a*GlMNy4bomIt#7xay>E7$m%d-V(C_} z1GRMZpx@4~yvLIiL4};9t(Xm_tPDNVDT7jCK^qxtGl1*Jvb1rZOQwWHpC-{>*FJJ< z+dkaQm0nKzcRXt;a+4*kdS*>W9=;I&p2PxnDlmz0`{poG(j6?R*9w8&UI(S}lvnWd8`g{*f-F77?&~NDD3fr=PHi)PVjWIWT|y+ z@%~}XW^!QW@V`FS(9lrJinSX-n+D8OlKIu}HIOIIH~&QUcRXSWzLZyoMyO#df$JtR zjaS(Y&w%~RM{kvXkQCR^&uTg>g!2UQvmG}2-@&Dig4|~Ym=n)`bP@=nU38z z3uptP_@`~tNgzDc+O*DcC=q4t$auQRs71boN%}9{f-H*6xzWpV*;;yrQbJ{$UCFwFo%AtYnbM6Ye9&;Z zxaC%-vm@Gj9k0Wbt6>Fv3be4y_TlyYK*PkgKosnCk~Q~I<&c{B9O(1N10{pm8|WuR znUWvUaXQ#qw>hf%^`xNDl5AIH>}X!C(LD87^;|ut1rxo%GnioaHyJ|<<4L$4AlCF> z(y1Nrn^`=uXOaKs;tTAa{w@r?VNX-E?8M=7j7?8r|5k>U8xM*Ov0XS>D^*)#p4QV( z=jdGCMH+)?!D*hb)1Tof*plgtP_k^s13I}5+nt71&Co5sw+IW%yv|BMR-3!H96-gw zHdVxR@syYOt&*5$R-ck5ys-pF5|WE9l*Vb;L?LPty!R%tj{h1TiIDVCkG{s}`%4y= zfEpIwPTiq3e0Xc?TUsZNJxqCABZ5ld6{x$Yv_gHi3A`s@?5^Q6ya!p~`;WFvlu2t} ztANFTdUMuKHbbeY<*laXR3d2E!Ai1TmUuZ-s=<5Cr4BusUEP<-u39T_4!uZ(ql&}s z=6mXI4~j}+ws)IBjC~97!k&ZWSiwupvg+cwmS1>X1pSUsvY$r9FQP0u4OZQAtaEOj z8*4fzuL5p!5tbQV#qJZKHY2k&5H7r(rD6F|+APAFBF~5YQ;I&E7gNheK|StQMtSwR{RHm$Kmw;66VcCb!XEtxY0Dg`ph&3NZg%sHKJ zn3{JIb~04BM5US=K;T?a`+G!e{i4BLEgr=>@_oWPbfuB>v8%9(7M>YYBuat%rD;WX zSjKMOSK^FFSm)^V9$OdrXs_qo|KH&NwSEGnkcjSyujrIle8k#zzS+h|B&B(A6+NcMUYIe)0s08U0?Bz9Z+<|OEBG1IDS&xgjk71!84ov9&wN}@P&;$1|FC; zfg-qxp>*FRt+!C+FOC5U6b+23{r?D$LP7q zP*K2qmM226IlBtLOehHW7wrY$Y|-K`Qpj+?fY;tCLOxLBa(YSs&#Cp@vDmtzNiFWq zzsc7Oo%QT=mDstskBcj-pEyBa+9Q&v`D{d34+j{JYEwy*B0Mz&nC&dm|t2FOm+o zDm_T@nTiSS2QxoL9#+~F2-TtjXpa(qx*iGcKkkHU)*4Fq`0jXkqLV~<{yeS7=(ALB zo)okle%aO&q@!tCl9ZCCNN(62O#F30XO(&mrp?{8>-@8`k|+h;+SQ)5nN1w#@C&Eb zQ1=p*lEguV3Zd%i{O0@HAECKHod^|-QWsEOz zXaHHjVcG?@hGIuCu9rN~gql_=$}s~-N_?u9V~3rBkZAVC!-X{4m|ta2UM*tl1@HtD zQE(|pltz82(C+D_y0YqSgc=pm{oL13@zzlaDXkih#i8baTP>>h$+ueF6cH4u3&Pb! zdv6_&qdeJY^}pPg|3k5tj^SeK`P15WS`b>S2#EAz1vjdi5DK8p!;&P#~CPRK5h$qPT|HxVJ}m?{e>4juMpaQuqET5tGFM z`+^>Q=f)KXw%b48)NpLZrOjQ~qW*1J|4C7z>C{(_?DV?WUGNZ&W}i4QleSV)vtS0; zW4V$EgpeIWb8 zB(GZ4D=cCQ>Q6MX$!g5|0p2@*rLGp|LeXf|wRBD3Vp^?>EH(POQ@3QkBib>5iNgM( zg}WmrCE%dg3~UF~X;W~s&jXW$N-m`0ZoOWiiXZ|kq=y_x;{Bh`t{>ewJN z$FrWyw62mCTac zWRvZ8(-Jo^p2aBWGxl@cw0QoU)wCg5H~MJ%XRMrVA)>YVdz9AbKdlGnH65vRgzw06W{l;Dx=}8znlM99lS@ZDa}?)559ydFK}!{b+VOEo)hN- zx3Q*EUiXA@{H--?hj8|RzC4}hiE=~9+{pR(_?UGY&vWsU_O8NuC{sv*AeF^n0yfSq zbV3ir`b_p3x+j2YtgEeQ-~`2#Sfvwja$IhpXd}#>pRnK<|3v`KlGm9$ipkJLwst=q zF0A=oHFQTG2_ecIQ2ASGS1~m3&HwnuL8*3Ri4->C+U1`#20A|02Ccx1d(CP&>Z@lG!6_240K|dBM5eP`M zvkxE?6~*L;rC!Ef?tHtg#3EbnqAq+@L3k8@LJYl`4k0K#?2zHL=`rB79YtNyb(e-h z(3>3MN1t+wY)e$TX=4M&%!jL8(}!I0wld#eE7Z`2m{|$2I^_0n5vv>Gk!!&TX~|=- zpO@jG1-1Hf_mM{rIonX#pb+KUC|o;lw)~+_U(-39v2a{l&o;IjH6`GEwI|z{MHP(d zL7{@n@lDGKmRjprISnkRuGt_)D{3f|UmM2p?~QlwQtMO8-2zIOgnMwFi-S~#J=kh3 zle5UX9dm?n9}x1qx4U%1Q+{E0;Eb znQNb!zin1&lzuS9OE(WLthn5!|LC1=HVa-kt0(Fw{^S{X+ zzC=nlyb`}ys3{M>w`2j*f+<=XtQyjc-#f+1lDG}6!fStq!+ZLUY`%HAO#C&I>*psU z(+3Zu*49%@Pj1t256qrDW@sjM;1#BG_5{lx5#iiM@+P*dhBNGYL@w`y&tYvZ=KbU; z-uzqU7VspE08FJD>w&y{*cGpEpJ}ds#K8DeNOg^lt`f3K$zgk#Je{s;RphMrSI=A3 zH~p7k`t&Hvqw;|r(th_1OyH_}(f&lM9{SoQD|~IyasT>R+p;BLU5!oh?nT&0c1f|5 z(tSjRz((zEDr)btECyY>1p-!+jJtWTXlcNbljc(Z+R&GO!yb}RSpB;tvC+A+s|F(f zj<*P==cHM+TEVMxdRs42^Fy7r#>*9@*PR6&78uAWP;lb%8{zsT6iwgOSt)fXnU@@a z%V;yT4M}AD;q#07G^Fun#NN!(=E5#CHLO+Te`-geWz2tH4FS=>kqIs9uI7k43T=M@ z>G`#LCrdOR6fl75mgV;A`Igvv>2L<)5Hndj;Vi9g>NuKR86bNkg zaH=_#Bx`&HTXm6M9~Qa$i#;00jdS?WX*tk!|97Cm2Bp9SMfcBpz(*)i1$-tLd|~)E zD!?IG(fKzL7YbLa#rB7t{3vivGm@R8jwp&lS&ScPr)q-d{42xJ(`2fvGQ5P`@jC1+ z-2$tLB>klP@!yhjP}-bU=i1kWXLT0+)O8LAJR~f86Gg1@E@V5s*Z1=?jtQv!*UT?a zffng$CjArPaX=7|qnOsR+~}nCL7*7XJ|HY)?kDgkQd6@&=G4*B!sECVMOg<#7PNl& zf3j0IH++<0CqDdQ!M`UJ2g>H6Cs*R>a_C58`lU2)R$JKP!Y&DXQFycuEDgtIRunp^ zM+FnVR|X#t`70$n8=Y_Pt_uy1h#J3BznHHuhB zsT-Ed+u{;OYS4C#y{)2ONF4yocXr*mLT4JLd>=nm!A@(6rUu2d3oK`<7?&m{#>w+< zQfIUM$~Dq6zEF@6tX1t=o}9e}*4=pGVd{w-Hh$%xQ=K{r=&AGXS(lG9`ibwJVBk71 zI$ct+2Rc=jwn^|ar9*2A{1Av>Kp*6QyCHa7*n;nq=(Pb++Ak!{tdBpnT_RMw-m)HW zMPsIlnsgX|%Tf?Skr(l4x`HkZX(&IMP;m%LC!?TwR0@m6VE zc-+5@sFmj7dQt>>fsJq=l3qveuNhuI@tZ8kIXR_FNK^rmw#$h^-udIMVIV<9%_vX7(|_Hd+}N-1iblQ|a@IHr62 zee^tdfZ}PS=Y(O(g@u=x60)^2-w(;+h?#=oqgoLj6GM#PHe>N1BmzRIo|M}4G{`tR0 z6oaUD^U9GIR@oNujUi*Qx#b>m-*EKLP+dve(ttgaIqCo)TvwjW(YkVxXXD=kT6~dB ztNUIxcDYOB@`gt%m?F{dqV{lR&4rk-9_!Z)fK=fQRY~MG+Clq%a=xOn;fhF`Fttg; zIzEJqU|*&NJQ@>&&C~TrQN+tNj~0zi#c|ZPw1g|kGX9=>dCy7nyRU#}ME~~KUA%;& zPG;Acf1P^K0+4N1if1Ao5Y0|owSB%_0jUxJTX*d>uEwXNb4JS}@D~VWr^U(@(op^eJrE_7jK{wr3#Yrc zAED)>H4erdOJM{w0qat#AQ<`tP}jLhYm+ojls|*oA{wK_+C^te)a(X=J_XVc0AdrI1fTi*VOrU z-BEKW6s^y)Po{7dPowCqIjOti-n~Lbt3e)6XZSrZY}^Xa>zXV=B6^+F6MU81Ci%$KC%H5VJKcIw?0ve!U=zA~UU@+=FphmL zq!EgR7;#0_sicxCX1IK*ab{_)@3CI2nZuVrp+6C$0e1_i^aC9xZ4*T&4>tHQIjO*0 z3a_op+2&wXxH`)S`=we)btm(&U71aZVnQ1Z*J;lK#i}}E(SrfZsOV#THTUk7iB=tKiR<+f_Y#bYuGxt!!dOB%qFlqaR z=>aN&{E55Y)^V%DnF~jZJE>P;kEp2*9KBQih-w3`S$kR;G_`44t+AQkhTH7tif$pVmGh;_c=eDah;LRF3yIXa1p)IO>ZAo;L6H7P-A&7rFh>d#k79Jz%}iGK@XK z?w`4oC_0D(g$~}NkJc=N;`8Si3s%X4-6N^_KbHwLzWXOSwLubF&uPx8b@W59mT7aQxM{Pe2paBW&@P_^kgO22yhMQH?!3K%GY1RU&32J7dKG(2q`<(10u*BRuKu|Si z=olG_qvN8@zKZn{2$cUF5kQIMxu~#MZSIZ+cXC~W1)pA%rR0JpT5`AUavv>UZ41;U zPoE$|Zk7w_9O9mR{yCSc~$Z1SU?KAQ*X^>!OLa&MI@;H}7!o{_p!{Cm_M?ZHxX1cNNi%vW(+;_2a=G{n6dy zcL78Owml;-BoIXQ#^?co*@6?F{mpNF;{ek!3S)C_2@n|@?IOW4Y#vn1+hQllbQlp4 z5qw&={0{Kql}YcO&6^p2_q*S@6)RS_WW5#}^E9SpEX>%G0~3M<0B!8flzAZSnKNg) z`Sa(ySOG80(XuehPme|fxW6$UziQPg%Wv`*AfIy0{jE8Z_oH#;{#D#>1bh(rek1tp z-@h9W+1t|>YIkC*1v*xEU!Z^=lxq;LPEfncr$eF4S8X3QGSDA`eLI;~moeuoYK-g?E^Y&<_y#)MtNHIB7Uhay@D-5*#M}f$L(`7^! z{0l((KTDQazBgHowa{1XApm`>zqd=?NQAYq6qlA0^P!&TJ#jZ31QG%Xbx?00dhZ=$ za7~;z$v)XT`<(avA^Y#x?+@p?&bju<$*=AL_IP5I1sv{7uN;M`4O+qmSI{QiA61D+K3eOR+a0Hb&s|h~T8y@2Gh*SNEpg{r~aj zx9$1(EcVD1ct1p^cm#vU;{+n3h>Y@XxYUk=L$D%pQN;;YBq$#c`qvvaSRVp4-olD< zxBb9D*C3@|l-+^Y}PZNk7WA)d&*ZX;- zxWv8r$!6Q6fjt4xLqTM0flUY=7|s8`)Q0a&Ngp`M;!24g+F)5lH0* zdyLYHt~WqB-{Io>>}scjfFCh{rfmG9({Ar2*q(_eCR~|}z>XYehVA`u?-pp%kq&IM z;CQ$)9SK35rj1*mV@zlxdecta`_muJZ~7o|pxjPW)w@0W3rxAUys}>Jh%em>8lVAPQQCd0W@k*(v4Og}v>;ns*W&i+ zvyTN+68^iCW;aN2c92x300RE}-e>NezkF_k^N~`k%@s_#V##cmmXcui2me!0tX;oF zCrrKe;E__h=l}iJpSpztWqTVKM084FtzK%jf0Dv3u1SVn$V(PX*G}qluC@KNd-I)7b!`!Fn<+qjmdnl7NfJsE0Bvw!z|zstX)rtL zz+JLAi_=-@Nm^(bBN+P;?eIM7-jQ0cR#hXVYlX%4p$-n&XJPy zFJF1a&6t|0!R{d2m0MNa=uT8OxX%SO12!*_;_%NDU#8MJS}J2hq(~PWD31e&1uYBK zFOvTk3-Zs(nB;!_i)US4Zkl$Ce(9Q9Pu>%V9I1Sxd^~s7WVdwD43+)&2KyHXU_UI? zX5yth7VAO*dSb!^wdp+{bsU`j{`lPok^94{x5kMQm3lU9 z2yHF^JAe*HowJv-8GE~R<2v_7A>7Ga5&tBTOV0tE%lSH ztKH%~Un&6kgLgl2rNzhHcqz=r#Ki;@a0OaFkRY97&0joE z3e|~LUW3)=C!luy?wjwse1V(^NeQ~*rNk>W-sw}UpJF%N_y>ra`-%&XODX&B6_J_S zJlTHI9To`w+2*Zgw-u|G$xmhq+#c^D$KMKho!3b~?K*DsOy?PQ`O+0B2st+?HOXLb z+Rfgyoj(MKOuv8Ewmt6Xp?tNY7o`M#*{ynJgR@G##ktGA1|qJ z+dkjn%H)%wATnj}!8;$hw$^r2H;zk)bqkivbJO%ZZ0e8Ce^P*$#7=~f9!Lp3@0{mNP# z>$9xQazEGe&p+>8^+9CM9ey-F@g3HwXyYhv+Bp0k;2p(p)~WC%Klm2t&H_G&+?`eW z?r--3BC{^?wZ_G5fH@!1w)C<{0ow*=DM2gM+w+?X2V!mz)sq#;dQ#SQo|KH&oWIf4Ir+K}mUvHl6_`qktV8cy6cSI-~yO zM>}@7ccsofM%>lXIkR5s{BtUgY_S^GOkKUdL1fx9?1X;+BD3)%CpE?V2H#23 zW(v=aC|;9?Qk4h3#72oS`77dCK8WmjN#FnULFB%-X`jYeUte#7e+E9hpaDkbs&mAS zUj{wi2EAly5ZJNIE>d^uenWtVfTI)^~B zUL(^}JU^Dtfw9}vc|56?2@5@du?adtB2h0CY}-??MK!%>+SM^?QU?$JVe*BE7<|j$ zy?uKNdcMf!0DKW!PSi^b`+G04+ke~t>keDD~p9D)-se{iZaF8Zw>pU%j$UQ05x@{jtXw)Uq(^bcD!7*V=Z<6qMilaGNR0 zP8Zw>pgL>1K;-E;4#wy)`@|EExn~80Z(O%T@9Lh9 zHS+3lUx3KV7tgX?pe)G#-`75LzyGff1OrF8+^l3l=UHyftXxwi_MXX4ps!;B1&$V! zyW{0G?s!F=pyg!qrOASqpLpVNskNUMh`iMle+LWp1^5N9UA1iPO%R#5u*YeY*{01q z+$Vy)8Gv&EYo9)S#?6~O#r?}aJ?CZ$e#b`voL~R*X7{!LciKSw_#1&w4446YfBlQ~ zQt!?%n=@GKkgo$-1H|*4$%5fmN%47#pyEk_eZ!^r>;=+7KkTC|yA7ZQ491^OLIy<6 zkxkA^X{@fH^`1cFsBz(LyslX@a@=A8(ZdDFF$qvoR-^QN=Jp;eG9W!w@b^M}zG&W5 z_5J=Htnt!1kUxD8d0?!K5*;wfc|lVbZ-9I~J^3fkmUddYeYxRa}gx9<(tPsr`b3Q@I9lXN1Ur@$mswx*XTN;Ot=qWPJ+8hn{bK^@ z1X8{v$h1)4O?hpz7&xGd#OVYoFcON$+WS}4NdNi|NOHlH7{7AMc2i1 z1tKRIZ0tedeFY+;LXEBV%fCKQ`FD19$v31lJvGOrOQAR=BjpzNLIL;ap=0hud6mk! z!#+n%de!=sRxZI~vs=D=;}0yZ0m|;$zSn^Eflx#qtbTvKAY@?nnyMOCU0LfkNFjWK z{BrELu?8A5r%Pbt4QT?R|KQz^&5z>ZV_bYvoNOaSt~t|9NgTTY6`$-lX>joR=LOsA zqW?%=(W^JW;3#w#3ywc}@R+Fphl0o@g(U)qcc{MB3Z}d2#)OZNf3J26mo2n$8e=mr z(Eih}Eh{Ru?*N9Arl|taW0cOAxCv79-rHv&a(U?qLCD&4eW<`~eEC`T%YXhGh<*Kht_8zfk)Wdp;ezkKYz*s|S?)_4&RoJ|somMzeC zZaq5K%XfeL^g-nQu59xMax^Q~qTg#0#vCFqgk)&mQ;ITW^_o(hR91a`gv@JV6R~`%uo4 zVxtF$&KHRM>Z`B1Yyl6!qDSX7A(b20BD}=DE0oDTXgJc0^tjFX3v~PYJ z_v0w*{>X(7BKL{!Uc*%{dcM0%uzQK^BG&Aq6wV(mO#$UJ9n9a>XTwt9wZR?L~WgciNJ?w4=o6i6@u__MJR66Vt~pcl z+zZ-(mLhRkzF?E7j&k)hU$8Q6itA{#vHsVj$qBG}Z@f&)0qR z8h@Z{O`Qf52QEg9ntD7|V`0jf`Ux~`Ao8Y<46=@pfD>`D`CwvP+|Bag7uHX!#ISJz zi|>$9alHDS&q*~Ms5VmZ0`T4oh>ZF*ezHz!{BPRa2VhM;>%?cF1b(2P(BN6(e{RKc zw^6nqEg?=e9`t6Ohq!=)0g?aD+wT}OKWlQXDY(8{f2N46H;7QRwd1yaG z!h|OSbuK1`Iob$tlYB%(+$xD4ngk{RIyyQGWM=0vyOlX6iAfEhS>W5?msjZbe{BB; z$Y#P1zrYq;0GpGwy9IkLO6k~X;shUJ0)~M!fH+Fie(2r;_#yx|P(2Fe zOhznPw8$nRtb?kTupfGF0ope%CJ<@kXe($JQq`fMjf+(W$Pe9Hzz2~7c0mDn_ugYt zL@syLwN08}ma6YMsrm?-eF{iKZ6O_TAk&jl0IIETkihkt&_@|HE0x2pl6wnEX96KRW8%e@!H+DmFTc0Q*W4H5q z=FvhOt^Dy0S6S8Q65_{8)pvqROV#^Z>RV4eDTUG}9yc}R%UZNOAw}w4Dhq&CK-dY< zW2HbmTWZm>?h2af3lMqb(%A<7UQ)W=-?ZI*@X-z%7y(45CMUS*Q!`w=REh^nC7QpX zmQ5ax$S;l@D^uQe>VW*spfK@_ zlCl=i@(UZ5nu0Ok<(VjZ;}h>IU*vDtupurkW}Hiwl5$4cL>Dza+$J)3FI^T4b3ym- zQ95?-D^NZjbzzD(Pw8GQcp0^3z+vjfYj1vR3ete?FGzJ8s5vSs!W6f8wil=;uf4U| zy(P8j@#7;LFft%A`5Ge|;Iq`D&7bZz<#F3-D!6^sin)5`98>W|-5J~eP{9Y02gdRo zr`;leuUPVBf`f@c`+i~jn?l&R6qRSTj0lR zfeUKm>uc+!ZrWxF1J`uoL-yoMmpvuxj^n(C$tFw+v)PIb3l{`j16_}h+5}3V)cYSg zkS^M=Qv%&8q=H&kQ*V6<6mQuWF-!{-K#Ey8nNo%wSbz)L_#krsddR(k$fOF@R5stV z3l`n}`A)Y@kfW#YkIL#IsqMxk#%UAyAa_SD<0OZQ40^?t%f&(*>pCSNPf^ zk34Gq&hwg!sMgpAm1UsWMnTCd*Dja3>~d4lWz!BI@)5zRK$=bUjnmKGfs%zQ*C{;T`dKmWo#`{MdrjM`Y&Bvn>G&JuU*aDhRhsM#)*Vl4G^ zkQS|g99e)k3ydW2v+_i><%hIH#f&#a;e|`)3+!AVkpIC0k!=B4ee{;bR)NTG8T^`+ ztK->Kw-cp!oFE7`d~Ae);*Ir9eWHjQ7(;z+04eIeO^TPYiBMlBU66ZvW@^A^o_@;u z)n5tDMOhnl+}4&hTlfJYUA}s$TfB0S7U~53X-v^mAg==rB8O>gP_Op0s-nhx6Nn!F zo;qWS0D1``sBia_RF7HC`tr+buIqG{seR*bs3_-6n`{ctV-0to7uZRrrTzx`$ zJYHq0$A|VEaq||>QGH+LVx=%XOq=w)I)`fc-knlRJ67a!r{q{aVZwwcH!eC-^W{?X z;$$c-0Fw9SrdHd;P*PZS)6X(<(p^S&x~VAlMX1WZ)9bQ?}F7i%cy{<_BgSWV061+M1%bFPC@N<&XhbCroK1;D=N#ZxuCF%$}-BDK*)Um zK?0GF3%&-d9xhQp6rh1MBLvWn(YT4VD&CjX5)!NsP}rpV+N6XUsWI9!ix;~&dZz>U z0fzP#M5Zk@P*kw!PVpz$Ib4w6vN^L&B{*!vNK>AM1^7xEg3D`kQgW@-RO{+prP44- z@Aduyk;BBkT)1?}?a})j@VQO%eL#!zrb`i7dBqooY10z$E#-brO3}4Vjjl@esT43< zSzm83=`)L$NJTc%jZ%EIVnu**O9fveEcB$-Oo3fd6~=aexhPDt)>vL!V_+_M1v(w7 z`IH|TL>@J4m>}Te^4~I7p}q~uwn?!uQvcT4b8MXQINTs!*7s1|ZC2l}Rab%dB*5Vm z0mCcRcLGu!uXiN2t5p8Vs;f+4d*`9UZkgci_1e&e!tx05OXM5%W#R<%ZffsvpY7SL zYp*dPz^*79uhM2S6m7?j2{(=j#a(Q8Sba8>wyD#U(_EF^OKVmekWBdvlHCc|_zP`d zLt&hgkSTkh<~iyICh4RD((%OOPbgnH`ABi0P|Ve;G_?u9&6qUNtzEd#KwjbndcGGB z8O2~=WE7E61_y8^oYbi!$K;3DvP`PTK+sENt7YQf5_M0s`nA}pr$C(pu-%_uU`p=z zSCm*WRM06G6v^ouGh=jCbCsvgo-x&G0O&J<$`e#Bl;h%=v)n>~$UxJ@N;4|X#Y#8z zV#(}TQgKhVx{G~z9?zxSs8G28G^3K8B53)g)oWZ%TB@D0O#SL_5Sg;UPDK?JZim{O zYT1$XZt5O>lr2b}b{U@Gs^MwqP4B zCP}a-lP-ygi3TIzE z`A`4!PqtIuSA4j=-EQ{@?Hd=qOdChL_UyCIy6MxW+wZ+mOut>+KlZ=-AabCD>$PB9 zP*fqXxZE9(dT)ys8v!C`OwDp(qjX&Ms1c?rd-cjS*VH1AL<_i0%_jwMUa(_dfl^ab z5(M@N3J^qjMi9({1|kDdGf-`5JtcMD4@})RF+SQ^j~iIa;4M32lFQ1RWIKd`WdW93+fEB)ZI*o-Y_WXK%p5mc<&Y_W zH&ctok3RZHud8HVfXJ(s&Cx*Uua=gBM@rlwU1jCfuB@U~elfyjrX^{yB3eMK0FS|f zK(sK6f^l)F`WhvbTD%B%i2`(IOv`kOq~Z&-%!CVy#=y0oZ9Cvjoj$9pOXU#b(iLA$ z)+9G2Crto$q(Q(OomE(T!W9%(xP$Ud{F7%z2pV5FZ<>2y<1z!B83dkHxoz8d$nDsD z)Rd)9RMjh?(mtz*B)Ewx3qt{qKPpxwZxTC$PZ% zbzs-MRKq94M4K93T81W1CT9t_iu{M#gg7vz{pbto-|gGA-|gRhz@TKn z+}u1Za7ytwRf@NfQiGK?ldF+JEGoD80xypp&Nmg|1Oc#7f(gg#eTVXJKqamY%fT#E%kE>YVMCdl|vy)OYTi{!6>hNvO~X=X@a7X{~uQYM}#W#-G5uef%>l(a8B zfyjWx0L=x*irp?L=>k`0=VlrxJ0@a`fwS1^YXO-p+7Jn_k6Q2f8)Ge0R#9_bDuw4I zs}`H0_=Bd1?7>v1p1<**Z%UzhljRF2I97_zaq+QIvzDTD>LdeE-+l9al~uj~_2W-cE#X51NsiDyz?dN~G(LMjOjcUiW=%)TojlkDK0&^<_%eF|- zhodV~(~|93qryhIQDGx&LqwNOPeDDK@<+WnC2f*R6RbRWD&T3hLA=4^K1@1cswykEJG zI>V-b20c4U^$x!!zE~+8fB$_zVW?Si4=6f!3SjtBW4&fqnRhq3TONW_HA;Yz#zgnoTy=b|_urqQ+^{!x&pI?=~_0 z^hX~=?kB?zK(q*SFps-O>Rq2m{SR>%)~bL`nbRF6n86SCZh;2@LgP5 z@P_gQNahM`%)f35$N;%Ox~w(PF7Y|)#QgyxBcg*3@~jU({Lp>y!3TC)C2OQH0!jd- zIb{*i9RMc}T1K^*Yv#w<3urqa4xmosG}MCAr4E~&nqu%W z&>QyvWwwh~VNDTrRukXQJLo|JksHL6@P0=X88~s0Hs&ML{ zv(v!L488076GUdNroOq!@(hrSVlx1G!h~oSqqN1yMnH?$<+R=hfT{QZfGrm=={iB$ zD5#=nOuEZ%K%2nmK;Ps6h!s^u=AVhhrRXjW`nl16WE~npRHSGaPbfU z!~Cs9HmsK~n}W0IF9AWV#j|I*pRaqy;8)7X6Y#`uNiU%6VW}yjxD7;`ELavOI!bv2 z&?gN*%E08M0)hcsQ9(y(871^2)tAQ=FMi#ZATn*pN%>EW>_wio?mu9@gF5ka^`&P> zfO3q=r>B4~v8v5f@4-jp4R*Z2kXm-T5bY*toPZEXbg0RVZOHuNQHb1H3Ob6fks zfXM2f(H7S>?`+wpF>$l@Mv{rTjT<+*RjXE6hlWng!w`5m{oxOPu#3SO zyOSCCuUxs(c9Zs$2kgLcOQ`p5+qTVM(BJ&#H}03e{H0BBG0F9h)F?D+_Z0v!3bM`W z0An9^n{%WTFfq3ECSAI-iwA!Ke!lkFYc}b^Bn%)kJHz>|AG)^y_Ab-W5BTENty^s( z>UY2Uo%`)?e``QYclPc+CjYNkfC)j`IFu7;<4_x5g8}|b`{s2ReQVunfrkY|K9XN1 zMbrW*>NZG$_@v;&2=~fMYu%EC({Ihxjla~?HM_&dO5O2tLBlQWra<)Kb1U3)&n&s+ zJEyfMTi@8`N{?3uo>Sdu>a61hl8zWYOp}B|ZNc%IZ?2n)EFkHHi2j zMTWZtf{+&qMn12F$OgrCSgN>yp@dg|y4I~;G0%=HVn^$Tn|GRWEsMv)1*BvMw)NDB zLla24O3P~uNX;*-kb-fH`=?hoxLMP)LvOw9|CuBJIz82S#=Z0YR`>1)U)W&y>8GD` zF@j!`5+)c3o12|%_x+#$)caxUe!-@b-Q3xeO#v97m4(sZ(4pg8x9>XQ4jw5EC~Zl- zZROHA%IjQ%b-5Ra^_WzVO9bdv3S63i49JTjaH`Tie@>o3v!V9_N|%&XD}B|jsI=0J z*YjR|!DqjRj{_gts2C0XI4R>P%ZD7X>7Xk{y&F8tEp`k>^@w19OEYErdm1yhOsr}RxfpE{CUSN z^!^7@Be&DFESz3O!p3uc)LDe6U`&`)>q`G4~*@Gx2cnDF4RO*nO zTI{}X}FRAZY^OqZ(b2`LT(V&({_Te)tzn-ClQgBKgBm&FAo2B(z^=tO-s zLGV|+)KO8~L?sh&8o+b3z~q4rK8W1E76EE~epeeNnRA)yRj1QD= zipYu+XcWj8i1IrvNU#tB;4@5$H%pc;aEk;zUcG!(?b&&U!t2&8TdnW9bk$NfO+Y`2 z%>cTG_8t}>eN=sbOYWND%FM}div=+HHcLTXh}Wf7pQUWI?~k%IMh*0>b@Duvsz0#e5byiRjdrsWE5z2Y{1@TsZc#@v8ZQLE3= zCMiIr;6QvOg{203w+aaFJk@Cs?Ai^hRbByz<&;i}sMSVU`bWoQu{O;IRfT!c$WohaYT6;MCkzK<{M z-*eC%(DSbdhQFf4o>5xR1cb$Z*`9Vy$}}Asf1<8EL<>cOwdr;C+?j6nd;yNyOv;HG z+f@9)456){`8c_dUvyNxVQPZ(|taOd?)KX*q;HN z0%9^Zj)E*=CahJl9>F~B!?ITOpjsIrpa1xe|FFe;U|xiL5bFSfyr00j+A$r$?18i> z{l4LxVcV< zPz=vzU4hd+X^(+Vymd1`YUU5HC3z+u{?C1+9q{yvFTSvg@6pZ^Pn4i3-tWAN>6h>v zAY|@meGNeMg%@5hXc?OV=KJB{wE+G``^H84##$Ap^3ukk_D;VNpYellfje2i2a)e& zo%{a`1>&xAXQkeH+-(+23{1Cp)=Vj)PBnlOmD-;OBD+%n(Az5jb+0zs)C({L(ghk_ zqD@s<0`|t~J;jMLz>BE$7D*MfsNzIGJ$cM%H&pK~Kx81i^Lj5_Rome-n&Gk61-bI~ zmGkGhXO=ECC1BE0D8}c%w0Q=|5rs}viBt71;tE{@Z`7C&0I)=B=fJRp6%wLbHGjT= zul9#t{IloiF@cTyOq~`8jQ1O0Z20If7pXC4gvMfD>3xdu&?zai0-{DJe?YWpdY7l_ z0?H)-mjX(1p$0unV>N7rVsf0`mBE2N0yg2=C63~AsRf_x?6UD5>xpBfJPHuVb5WIM z?tpRHDcwUlC&kBEUZjWnon zw)$qXbPds%3SYw~Pj;SmmD<#FKz$|@sCf=b!sL_R0bRS)e*nC`rgr=Fgb zrM1*Z16=`Pxd5{%GenD+Zy7dph#MzJm3&Y}lo@Klt+E||8!da`%L~+3n;<`63{8Kq zLqKSg0MzY=4!Z3J54mCLqXDl1yQa&(CThc1jP5s}RdxUbeMcv)yuIa9^Ldn#=TDpJ zUS7RMFnqF=2Ys`%Du+|*lYX>aC(G{GX$s_Q9GEvXPi4GRs=kSTmwKb)3-qoTY| zb^eO%F<6_3`5u5Zs>s*mtK=W39MyE7bn0gML_xd)dGQItX~o5}KbK#P6|f(!zG9yI z6kvF?pz`2AnN#0Osw!`^CADtF+&S*$wQCK==U(dMHO)fepAoBl_Q*fBOw&~`Mb9RakBVopnCNIk;OFEYO*J2DqUU4?f5hzCZd9mR)W zBraUO1|M$U z=04iC-P*-93l<9ekGA#)wPdR^YHKga2dGbLq(IFT+K!W^){`B!F<`rV8P(^PR;_d| ztX^$pNSlnWpVhbo)$F|*_W&i+zLIa+9_m}5tiQCe1a0*NJr@|9w$T)kZ-B_5&krtJ z|M$DMfDazGwA87NmRcIutXyE9(J2So+_m?Z+r2N}CZ;)38K4sgGiE|y zU>=;Y8Xad`Q`cgUC~4!K^=ovTOJ;KLZFl^g0RaO)z|Q@LOQft@q;~v*sUL?4)Qy&6 zQ(8*A%TpTcn52((xC4S^CugU+T&dp1$LUC9Df)8sNN@nkJ|f_{M6k5M78>-SCJn$0 zU`=3P*4cH=6n6n#YwKH0UFwPyrkOpUj@kl*)Q5*^K^-6!Md0K~+TAKZdeES!%>H*% zm-ihk6u5d+gU)ZX_&!8wk20lURG>oxka1N_vq8btQu_rgUb||(n>HoGZe`HQ(YBZ5 zBh`Y2D+R>@V3T)_+hJ0Pd|+SQDtLXUUCEQ;1S2PD;%T&C_F)F06XBNvc^d>fG&HuG z!Y?YnfM>~ZDJjDrK2FeZ&JQo#Y@={%^UB)V(PWd(J*(}aubGxKE%V2f9 zfB>N41lf_eQHFk6Kmqaao%W7Sw{^!Mw^O#CH)o1L)luWa1y8e>^7p<2IK549HRTj0 zr8eS1T{%~I;&%d*Db!8i?mhbp?26P8;}N=0)y|uob`$Up&euTw?t{n!V`&dWF3c~s zW8zUfUAbntTd`)T!A9Ms0yXn*HL=Bn2*;44q71y#CAb3UZ~o%B2H$W5_DDhZe(){u zW3<3O0*DNx$p(eeqT^D(Y_x0ItZD9<=hof=9R3hx+@K9d`=n%BDpmJRO{!j1ACZj^ zz*{I^vJqk!o8C0>4)8e8!3UB1*W$K-sJ=`^sgCyBx=SjeT`pN*B_JjMAOK=dfjEG8 z*-U_hiwyvT z<*Sys+4Aqu0O$i035Z=T)!urg5vAwLT5Ln9`7r?*jFAFLiNg)fd`x37lzoAYQMhGs zWgc~B(Hx5l1==#ztAm0k0X1jMnJzFp-3=c(%mD1NB5hJuyTJFEOGGhu`mCvLT=d_M zwdlWh%1)=GAWuC7ypEN6@nS*G3FPl&0kuq0lpm@6|dDqE2lID)*GjyoGbzT=iqT4Wg6-&poAu zJd}W&1)g5g|1W9d>`&B(r_5PA1B4x+Uwj!*mh{BxLYWv?84#Myt$LJ|13uUy zg$7rm_ z#sK^Zd;L}YQm+o$;+=ecjK(tn#qwIRwU^@j01>8Al;b1L&9Lj-(d z&uFD7Iwne2wAB+9=dk}k1|LKoNNe})vw%sN#|6w}9+&xB)~Z;C@aJ*w-`0qpFyG6& zbCLibPQJ_#;1L(s`!=MzRMbh84hWf_O;TNBqYFYus21NYa3U%9BOp>yQDNWn?&ZDD zdx29=5gf92vKI0A$@)~ZROU!K3dN`;qehIf@&PI10r>Ll935E>gxnpxyJycHQ#)q8 zfcz5|fHR;m&!Bw+ZYD4F^_r^`z`%MJ?<;KQ1$>e=Eetfu zZ_qmgn&B)M=g3Q7&HZHO4N%0d?8}Z3aRm z74VZVd-4U(i`WH7gS*5BvO~0kZABCKqTNE z0h^&5yf$a`J)d_v@Fi-@`vnE>FDR6~ zlib4TGYo#DtUSn=v@{4ruWxM;NO{UG*0Ts^7yYDcC~ItE{t=&GiqRgd%)NYvwY|=> zXIz!?Ti@Jl%DJfL+89^kEd1kXkoHm6zOt}%rlPRSuPhs*+t~B{+rk+1S z!nQs~+=g%8Pb^O$ETk1oA|`7FFJY*Lzd< zNneM2xlYdlR_B?-e_it=)H!?^_!|Hln__p$i?#tUoBEt1A7X<5<8{&vVBahyZ)|p1 zw#QDM&Bz0a)Ld*xp#6;2dfixo+x(3>#wLf|M~(<^KjP+05rdJJXPZ5!+nwsWlV_eo zdqlkR1VhgiSRVWgp5w(u-j2wZ8|A0VRCkuE?od8F&x7S@w;Ng|S|D&4c$_u?TjR_4 z4DEoYkWaf*EMOcP7>F!TTn}d&#SeXG0Ut!}Lu2+uR8(-Tz1k_nF+=PwW*0F#p&4}F z@8m;Yr15Uz7)TJA7Ymm)u6nU~lN0=ni3e}@124+ZhOE0h(J&DQ7n2$MpYPE~(`a+? z|L)@Q(!qBL-aY*E;^TMX@X|xPJp~U$W*2$8UI1Ktk2n@;rwF@rbM=B^;%E4cj&5+A z_W~l57ta>h!OIizdg&((TwXrCXL{e|``$%by*zvOc|Wtxp-Q0Ub}f*jB#a_- zMEEE-b6S>Eac_lc?O}KL=j(5Dtc^g`I;rfoXaSdeGJpjHP0%7}R(g^a0v;ErThEKv`V1iK0SewDfJv(a?>6X&S(MSZQe_{2Qg(%UfjSFN%5&>=Z&QoZnFM|_ z$mTmdlL7R&absMH0N(i6C;`DEZvv@aJiYx{enaJ$MQH%?NWs8B&Ez38bO;J*le&|4 z4VP*vkUQlO8UTXvDe?gD%l`o#c@DM#A|mam0LR9R*Tl?t1F3`KVK9R&E(#7P6J!rC z&n{*rh4}6isTcDTAA3@ELCG4JyFjYN1w|ER6F}_=(K^~lKLMXyy=73FUDPfd!M!-8 zNDY_bE)}4-wzykycT1oxP@uRMg1Z)%Kq&6+7QDE-o%DU)b7sD8_V5$P-1pA4*IN4$ z>}4i=OEgyG?WZdz#n_DVkLpzzv>5L$Am5N@MI+hm{tc@Wpy0dqdY32DNqsoN*?|@ze94hb@lg z2)vCtugC;Si61c9Y37O#S$qx5BEir&GC;AYH)Q(>$$U|&1U5Rn7j&GXfAZrd1I~wA z9b0eqITfI#O&Umn>c!=P!Qv>K?c*2{f+!(FB8={wVUiwRA<3)Y!KYt`M!wQ`ArkIkd#mCwxI zrb9H)w$P5<@|}|7ov(DoWoHD}lacx&R|JNfI)c1zj_*p_IgBBo^o3; z#mtZTt4HPP-r#RkV`x`jO9S`KYhxmk*95)aEYpzv#WuvVI=Pt(1uU$YKw`5XYf+&tmyU-(hekKf#3 zFvG+fka>JQ{pd&3(8@*~GJU#j0S#q}1*0hu)dvgP*E39c(VdeXz1>u5x!-I=PH_hv z6uvy$h5*L7)g4$;PDJB$RU=Bv3#%@33t6$*@*+0Hz>dP7WpI9y+~G`wH2$ukdg~VN z2_PCSGSt>KC>#_$e|o6L*Qp6vGPA1&|IJAgDSt4jLMpyiw{8`ZQ=YS{a#h7m+(+FQ zi7&jms?y)#ZzlAlC3<4@zW15nQzP=%Iw#I@YZSyCE<;;!L8s=v zC4s`gPf+VwB=*7r(@aApig-7o=`O)91AWRjgM_d{vs7_6Mciue)jIGgY}3r$bMG_L zFZfl4m96&N)nrt%AWJ!6?&0WTIkMOo$T5pJ8?aPy*E{2Ik2r>#7kZid*cog90l(*C zJ)1oPd%TGpHJc+OG%D-+QHrx)$4V;YihtVV(tUl5!a$Ap;>C>Y<9r#_Sl~<_m+cUY zvu2|z(?&+DYxJa5jCl&&N&JH12c?B2qn~W-m|e`6_Op7zVjRCn@-;P=jNNFr^G;>_ z1T=c5c9n=TDPI9b_*%T=s0WAK(<-^B9G*$uj+$0Gsd_1n)xUl#F2CgEe&qAja?J

%q#1t|sJY})kB-1|QBEf!55u>KPNmcCzbwG8iP39nCu2=tJ~b=ASfqSN zRVAn0+X8)`)UI22Wb^&jqyOOjO28wErLll}u>7#U?CoZ znOYUFn={*|w)tfH8!-$VyhrM02>fGbE@^Eq9Riuq#%!LDsFq#gIL>v{3kI3U;MjuG z_X`8$QzM->$Jtn3vX+Z_y68!V=TEun}Sl6@n5$Q9|pl z^V_)t0v>XuR2%aeZOu##-t?SuEiyShKh$i)mn@mCrTJNpCVfd?5aD3Vjyf5841iRUi-z5VTy)6ZH57$$h0ibRcPDoD%FlP6JH?kn; zhrwTM6MpYphwTJo%_;ndAaPA~CONxESN0(o!79NiAqW?Gd)@n4mzGP|afeam4b-QB zkgkZBN$Cba&bfWNp17svANQofjT=zP@j#0cNkZ3xtrz~(#Z5iC)AGkS$hdM#tCL6P zK5F!(w9YdcHB}6>w2~GyO=bUno@M~4w^|F@0_KMZntTfjZ7^$7{g)&Q(?{{g3D*=)vD}6G~zJVF%o?s8CStTxX5?m~@(;ly> znL2Ewx!R_QN}3j*R;D)r?GALiaZ%=3H9ZMj6Z@>T^D{pFso+2!(3jk3 z7W}x6`}`3pV9W7k!dJQGCfcUV zpGx{a$G`Jt+#n@q>xzHY^wSA9RznSj*Ho zBMxodk&m`!h4!|jIfm9K07*39b#vx?d>C%2LXBqs8JE0b_$jg~T(U^F_s2}&pVoaZ zi4|jDu`So{1nK|7jQ*{eFd>^D_i>vU5ck(!M&_~k7@~<%^uod!Qe6d}6Y&py97m`z z7+Y5`5$c*vhVfS$uO};*9Qlc_`AiA!n(udZdxQqLKC$$O<;?6{h4wt7>NF!r6uAC_ zND7r?h_$emp%j6?Z%0YM*1uUs(J@3`Z7c3ng#GHDXI7vM0-FcoSI|&wK<|S*U_O^5 zmVk_UdlH$MP@1i!GUq)hUj@7;gh3cBL5obveZTIG16I4sa{8-SFHgkA+B!%tPXkuX zfPu0v(*@qqyx5`IAlEBHX^G<};Ex@NNi!>1erv0qHSw&PbL#}>Q0}AlH1?wkNDrcH ztH!h2DDm4H4KJYF`$_#E7av9Bs+5~EP*7FWB97~G(ffklB@TdZIte|ZN~IOvmRMms++9yeSzB~ePenUcCKowio)@b>QyhjYcclIE1537!D#g&`Sm0wpC z37KR-1!;v|{i&({C^D6Y?i%NeYRC$1e-}t-bw3Lxh-!Vf(XFC725tcqBNA3Ayzsv# z^fG4beM=oC*UD)2%n&d_3sT&J?C@FvD^r!}rOOdLDs|c7oL_wdKtFtv@d0BDGLS=ONL2>Q6j7`s2~vlga{y`e zEjhFv1FJ!|tNFf>P(pWXyoa(>mZQRP%Rz?r@7rnoq@ErXW7JxX#zq*n&_nQ~ixc5P zUnLh4vHDaU?o~oOp?_XjeT%vd_LanNrfof>>sn&_WmLePq&|4)a8KA_7@c@?iP3Ca zWjS-{SAsR=#)rFR(@Lt^Y=3mj11Yj=jVA9KPIU68M zf)@VW6itf>im<1md|uXN{pep(O0fCcNHd|eF}SZg|J?$r!=6Y6Jv`gVefBmYg;_=V z3v6;H$Tva!5H)D97TEjZ>UU_vPhPMnm3dIa!2N|8zS3M0p>^I9EkWVx9-BM|9En$CP+%p zoEB5BVc(#~(S$9xdrB0Dy)O6YGj&L(>&Z5vt;sA<RDNwW`ZvZrJ#8wUnr6m8%6Xt`H zfRS5B>vuhwc%ZwH>PnO!!RfXNG+)o{zb%_IoXlo{8e(41#$;JV#8WI+W$)$g$u$s+ zd5B4JRI?wi?hM0F)DYMd{)GAt#lBTOIYGhl1mT3twk~Gfb)919T+v}phPtZGxs zNoCVAtT)NA7=U*&-_W~oQSFN5(a+U5=mAnq{?|ihnPH`W-kd2ky;>it$lVvYBWMK{ zrzxxywM#!wjQ3w^s}@=T$*V@-+n!y0_G*5mbrYUTbFQ~E-0>Fn>?roq(CB@S+lEKM z+q%rulb-h(i<2{hONm>h2H$1EN9)Z(I>~IQIXUZYWeuN?By7|8gTLh3$4bocD4$=T zEMI&mK#ksZ0#<0k_yp^>ClUVOL(V?nxpmnHf5h;fU&bvn$x_X7F?-QW+j=QE~!QYhT)%Ey4fOA{nk9H%jb&ruw%qg+7z5&V*BuQ zlfRI0f{HUuG_}!)sQK=;nWMbC)90C$~bf#G9xxtZ}sBDDmd+!om?dBUfXb$Y&ddRtYj%WOCS1+%jhT6P%H&! z9)1{WKP6gPfv`Q6vPGPm~sdgz*AMKKolA3+` z(tj6o83eEVcffbaUHTK=2O#y3qdeB-R30GuNXn;zqr{&E(m{6OP9q1@Ir&a#hj=Zx zT37>EkUqbXS;`NE9+Wsmn!JgfO}$M4U%NJ&>9jOWXs3KU!pE+j^u+MJ`13mAy%Nrw z<}44EVahxY>|ubxt1=`YD)=||PPS+mQP=HfC+?wbJ1q3*ZYzZr&n`S>e8!afk0isJ zPxFo`tey@v>28Dj@Cd^1RjD>t{civ$y88_1^)g3>5w7HyT_*~&_YqcFoUu;(R3&7Q zn>3}(z#my=ecsQmU(?J)ZmBMneDgoA>H6TU8L2`AB>kQsV7^({@Pc3JnqZs~Ys1i1 zu@UaZanj)fyHW`k&p-3)y;6Q;q)qQ{@(bxwT_;naSw?)_Tx;-eP^BDXZmIszmD7!C%l`@*&i6Y0EgmhbRA zJ!Xg^Cd~teWv)fGSKeV?|FZSaeo&%15pWKVB>YUPHJ-f$B$wzF{rNy^OHzIS*nmx< zpY?9ni8|EZ;TdN8ij5?CcflvD{2B2y(>8Oaj1bdd6dCs#mWg9!P8@zpotQJUJA~w; z5dln^Qxcp84YnKa^%)FjFku-I!g}Yk^3>b7`qX*wO3_~anTRm|6)gSCOJdFpPHILb z8`O9h$hSY@y2`k@cH(mI+K!10ToxN}H*z|U6#e$_Q<*37Lt6VI@bcJdj&#?B6}BGj zN<`=KZh(r2)=%Tq;J0zbZx@6`QL9|yvAck+v^TTE+{?Mj=P9Qd`x*WthJQ4fST$;s zw?wn?pQgzvCv86eLcz>qiz%o8&)4rTKs(t3?py>@KdOAb8u?^L-zETED-@{)SGQgn zRnfc9JIz6kR8uT9nxu{5fAR7hem%?r48zH4m`ZI(-uVU9n6@s|x7q2nEVQgWj@SL` z)VOg$jiH#rN8Q;8fm#Z>35$Xix;VX+jAQR@BGC{M zNwdM*x3_b_VB~3aPKvr|(e;cK5)BKheiO}OH^*Ct%5Io9w&$eGoyA=;!)C!KhhLje_n};U@h1HtR4D|3wpCOcq&f#6J zJeZ^)j@f;Ur;qR!{AMkCJrtW|^?WA{aa3jPX8HrW69YL0XPCt<=%V9)8&y^nh^!Xe4BnIAgQce}T0L%rUQq#o- zilh*69tg!SrYJZN$g^@aW+|%cX?}sV+@r(>VAx1+z z7;&r_2X)IB2&Gj*fPG)8zW~uWmm+zvq_#oB(l(lkL1kmM6e%9nnuJ#SlH1kk>| zviUmpUi9j7L0`V6xG&*jURknQ_Si=%E_VGE>s}#K5s+ zhK!b#+p|>OjYHqf8IRaH$%I;I8W&}KbU`!lho5tFclKxaKHzU&ZWT?g z=hMq527fEtZuhP(q7LE!Tn63S0dM}foh^SpXu8_2;*JahvANWDc6Qbt9Ib|NE@@2U z5^kUlyMG%~G@RUMKf#S%>3R4m=?U8|oDrTlrYd==q~N5|Sr*xe;L>QGu^ z{LqBopU{-tAQTXg{V4I@AGpI}i!%JY>HbXE408Ia(mqtpiZ-9`nUT5MCSDZ3mhP@x zCmG{HdbW&$e#7aWP*8!P4!p0_p#Dh4C=`wXjs2g9I%=t?q(m7gD+h5|ht$EeSYivM))yWean-j|euG{a|uXdkI@KtzNT zBb*dRZ1i6>r56Yn`Z^B+H~^NCmgI)mh~KCao}gkuml8c@)8rB>NDn!8b>%4_OMF}K zCnhib-J1}Fl=>MOLNq+Hfgvx*Dxb|%JxU%)t;RCFL7W z89)+Dq`gmQ=3*x84!}MqN5s; zn5c_5MDts>IM+CGI*;5F&AqqxR085^8wG3k8Zu*5YWlXm;lSg?z%zp^`@1uK;*2~U z&C-{Di*|CIpKmDZuSa@+pkEHZl*ToRBvMhJ$s$mf0mc-w6fIP~M=x9T%-u20hW1|3 zz!Z}*B6_$+Ka(?C3BTav{*&R?^#e?Uy0yz-Kad#y56D{3Z~FcLnP$S?vgcn3r%{>H zO{eWsp2&V*q0{;S__AFNG_rd=lA9P6@VY8-n_7LaLCE2Yt*9;Uyj_G*9x!LePt4|69Au zlsIcu7xXDJXGYXj0ALRDRiQ_GI10g_%FxKm!{tMj8}oeeSW_lBE+exTJOG6X8Bua5 zx6zDcx>wiU?%NtXNo~|I|8baMeZX~MK&3O*5GU?V+-nj*z|A;pA=RP9s+&GDVfXD?4)kRV z6ScBc^46&c5f6nqI+0>ZdccEM)$tPCXR*ca*4C1zDfJi|VL_gitdbE9ZBHi3N6F6p zSH4l~nHamRz zYYGQz`0E@iP{^ z(az5P2|{b@Vow-(u?rH6AkpYo-HX5<#3gCg~#|keM zD$*CVA>_c9W34%lq8=-@0v>G&LF`av;74rHR9;P3fqPT!AsJROC-O7(KH6QD??B%B z35#K?ME>DtBwTYD6H@);_1k@5qSsFQHztYm51sbvPx1`$OW@XvIEj2h21|jn}d$Ar_TCn&RK0e`N})>JkVJmkA07(x_vI{7+CitCoOl}%iaJJ znIuD!0o;yfiIiIw!)e=_Y)N+A3E=n3gMVlw-w-*&jrM5f-J8HZ1l9|kqmIjS7$e4N zcn{J<-2OnpAxhlIg$C~>h1uuNnU(16|FE;Pv|SV2(`;C$d#xk6W>L-{vl#nvm}Lpx zWu)JQ7}8DW8bl|NR#hhpKPKfDKeV+)%HJpBSG*4V9)=s)R3w*X|0>C?GsXNM7bX#a zi#8;9QZiAiD*W)56ILI&yu6w>Jf`J%kCPaaZrj%-d5-Zyo~!=Th+`9Svzsnp;7;g` z0e-l4>!Y}1oMJ&D(eGts zfesjCxnWVS4Jzb8`|Sht0S9i;aX9+8@43WpFvhjGZy0uWIh{3kEka+elxtHF2|P4t z`3rNsmUW8!?XB)w-9tu}P)}mc7?)6#nkom+UK{5bU%2aZ`83V5v1__6{}TRc;ezqu z=MOI*o8b8dx4QXCH%-Y=9dYrGI8!-&w3eQ z&dOH%duOve2i1aY#A#oh>gzkia?eY(10(Z)v=4l*ibdwp-TbRP7B2bWyOh8)71+ky zODQw=l!0Y`7fE*VrVs^{}b^u817tVrhs2ns)}is$er>G2)r7wi?H? z$1pOF>a&FTo`iOS?{=B#d1Sk*>X|g-;#8$Dme83Fm|Zbn45#q*XP9Ofr(?&-3#(XL z9N|TU;n#bEqE&OvHL-qRBAO&;XTs3kPY63A;93cSrd+0% zx+2aK5ZwTPG#L1uf4W4)7nP$z=AiYim5to$Wuvi0sp{?~+LeR<=*qws(YWel;XoqPC5j`t!F8&VQmbbu@2 zGX%ftbEylr5q!EWLN9Y(W!Da)*VZrxzJW5H<~|eQ=hj1PKTAwaG>>{MSGsU}uDOgA zKH$;qN;tx1-E&s;vOBV-g&$HT_|ER5sLhXVz@GvnNNXrb>{C&3C5GWxw}X|n6JNZW zePYJ^qR2(#Og;9+b;XnSi$0t8v4pp%o@Y@c!AN!fe=&u(aIMn=c-JjIbr50{-+<{4-I_weP_(>65FTm z5|KL`xdAZ9uChQt=HPebhK=Gx?2_~s=W|>M{Hv@)c=Fl+Ey7Q9B5Fl7FQxG`#V3P5 zqq4m`3``?&6NdZeGLl62rWI48w-|cdR{}rE`Z$0Qwhev%coLIS6I6P%rEB2q+#&ho z7->l@#5Bh&;FG~#7!J>mo^kYV;6!zDI6l+q=iUP{wKj}!^Jk&Nj_dVUZHvmt?chjYzysd@-k7W1>l#V zM(cxtZuyNh!)3%Fzf6$(oz(QsP>BA?4BI1R@!5yIE#Q{wso%Snki)0|dS80CyEnE9 zNpEMzw-k7!OpK6oMLKm3m(S(rr8a`RNxn6flUBhaN++my_RQ^e?xbPaO3h~1@29Wg! z%`VY(#znZwMQj)-^ZOG?-GzYk{{YVy_bmP&oRzc9wjNhpw0T4F$@}y;Z14uwK!3dg z+3!>ksaZVrjh)x zC%Ba+OQHf&f80=-1>`@(j2r|_;^UX8_%*WJ+mctg%ep)i&_s^tkc-Y|uTE6>EfbP_tJQ+ zV(`7SUinhCqfk;Zg3kXcbE13BmMb2cYa%CKA=w1NCa0&6?qrA3F#JjWfeE|FG8mtlj7qQ%x582AYE3VZ`{JHreasOn+7=;`g{ zilV+s#`W>uv0)ogCtnZx4TwpX2xwW95pv{&_F^KPd;wpQuYl&qPayUhK9#81TWZ6> z4c)N&71KVoc$NRX_o?n(`cFjrT-Ra^*9{UiPguZ|KFj|1B9L@bs6$8ejT9X!7`?m9 zptY|w`rTp1^8mv9ERDVoh-u+WkP7Ab@=l+QIu-^!2HN0&l84PyIcdnRmsd_-p@p`t z?-Z}r2y5#H>3p;NQE*SFKBktlm--rucTOxz9OHhJL$Spjw1m^M!1!(X6*DndJI_3~ z9%WBF_v>OEpQvHV?$I}~ut+H+j9)OuQVEu}^`~3<%j&AC$IUu3GbbH!j_wzTQ{qIp zs!W7@2+2&+rgP4^u(hkv35b%UpaJPGl1!uK}P1p7UVsWtBVMrDe>`arp@tZg{mVtzOY-A?k2?3o>`h z{2;0pW#j$1h*=Wsk;RQK=x;!OV%3)76UU(8teQGRx;_ZEQmdt5e^XnRDUH~pfZ6ge zQ8xixQ2JiYkwd*8VkTYP0%>R?P|{e)yO_E~I~&kJS^=~z z?>ns6MoCQczLP??Q`HO-IrLm9+>V<|b!(|TMc!}3mink{((>DWX@&GK5o%(8FQqv9 z`Di*_jklM=0&q`?kcVdD{v>Z&`wjLIWTy+TSktL{xUSih`DLMA!0mrHOgCo%p#-j8 zu^g`y_hpo9?KNEy(mOmvgU$!(oNUtd_sMbU2@qZFWRVD0{Yg0z$J3b}ECHEX&^r1X zS{4zNM4&&ReA?X+)D*NeOj+@($hBPc+U#!w6Kggi{Nz$~sQCCOjBK;auedL@P`p(#OI5y!Bs&fCw@MP61I9 zKU}jkoah%Q8&vRTUW~ivU0#IW-R?9NicyGpZNR=2P+rci@@iX*85s$&kP~uWKg)O!#i|}kvLo?_ehQWN+*##@j z@)%fc`club0~NeZ)qqEsPcc>U2XJw5* z!Io33w}sBNfmp$KG$xQ;Heik9kG!tpGq?1;7!#}DTfL*5$KfTPddp9_K983(+pnKv zGe<>@fHN<%0zp3@Oh#?oN+LsE%MC+rn}>qaZcEMl*@mhX$&j8aTlouDYDFSuEcBB);2pL?hxxq$m0@%G?8(MWe5u(3u^E zL)ZC9!)^=fRod;WuN{`H#eQxMZq#gmqlBiE^pgh1V99l%r-_5wuZ~Ers`4{74W>L) z41=wqFMuhUY}&8SSI*Uv*@>#_<&j5{Z<~Wkf{{aI4f@YMcYXyVgl%$G@&$54*0Y>G z@pbNvZFrvtNrT^PYCDJt=&WPT3HogQO8-Rv*CUzCmQ}^@ zvbh)y%oj00gN)t_Aq+XseWV~UJ)gO^GZ{v6&7Q(z$<#!}?{_@7tSfY)B4 z+2z%h-)hyC)Gs7(YOfFp1Ni+q9)fO|0&bd0RcKqT>}*f41QSKy$v*w2+cz+g2&-Z~ zQN0cs^DZ4WSNocte^*vi@Y`kX32d|_jTLxFHPgKxoW*a%LYoSlCiZ76+;We?aZ8E& zrrHlD`_Z#LeDG&cjgX^GMkb;nI{W)>gOJ+fkn1-?P0;5#Tn|#67L6QDcEf$S&Nxic z#AzfgrvTq9wLz#rR7{}{U1Q9t`NTL>^v`TWK>$zjc9OG_nwL^)=SW9IWwO)m!A3Krc z`i&09f=3m*3X9Z;nE8hL-GnSfoXuobuG?U5EgsjCHgsGx)LO71uctS_kIwd6ZH7em zcYLGy8cag*ahZ_t>w=bejUshhPvH`6Uf7oW2s35dqA)HR(e>2qMdNezBj{`Bd&_~O z6|`dr2It)TZ;Q>6vrHo3SKvwFEgNW00UgSj zXac(T=g+%#aCi&KLzZBT`dd+L7lK8WyOzu-8hiB)J0oV}}L%Fd^4v`b~R;0aror%O#)m z$2eeIjVz%TEh@M?euOn3zf6UKNye5ugUx7MX0l6F`Ph~g>!6E`2qnxPFj(0!g8o~c zsK+U^^c0m2iuJ*u)jig3@l`Grw2ED)){*YKH7h^BX1R#)v6Qt+{3?nRvH{dN2K2T)s9ul738FgT*mJcL$Hy}bCa6WGjEu&lw*iXiYImqG=6Sdj&n9UuGj=KQVi*v z;cju(sxvCzuBm>bIX(L#uQ_u}fAXgx3>CH0Z95Sg$t>Ci`++IROYkL@7pMaY%OV!B z>O)Iq;#;~_0*21s4;_dy42mWPEr2&N<^WQ>!-s3OX-A*fP8~{)BIUQh6-Sza2%F+ANN_m&T+})7MVs! zqk+-2_xhWKM%Ef|_?rtk4vRk0di=6ing)I9=Vx!IUO)N=3Ry0%mFCxFc#6MNo({#t za71yL=J%A9akJ`jPyleyaB_ZlqX>KRSp%u#Li2W?6Ca93yqlbfPt@D3jFM*ni?%Sv6le1w4`3c?>=^lpRbdaM(oe? z#VI)()3d>)+}yyWMCGvf%^&EJ@ylSAVUec$I=eZGfX9Bk_(U2x!{6YmpkRWE zw`yn(ATzd*q#NjqYwTHM!3k7&oI$8}e0TORmV zn=7fP3ME~0Z8jTq#`H$bv5eQ!scgV#_l)wgRES#(R9VYPHCLC zMc^me&F9VxKW^|*XZ(JOf|&mR2yO}myjY&~q8eP|jvUe|6TG|iF=W?#d{ zS7{)p9M=Aa&j(HUNk~RNDtvHk1N~cauzvl15~T%sP3&YN`2J zpKyB;Em{>9!&b_eV&=xXthxD)`%cJVNi;YFMNBcAQx3RMuOsAtM?>++Y{C}kr2Q-s zg5;l@jY>-T8*8Qz2c%!qp0S@Qnce8+!AJ{uIEC$(t6^h2V5ne-n^V~40G`ollII)) z>JM8VTa>%-pYry+7&c+2=O%GqKhAs~yk{wKn+&e%$Zt8&q333)nxWoU{%03CRc?5Q z&$*nPB5F1E3X}H5$CPT5Fi$D7Jt3b9+e=8(zv2&d^C27+@$_W!84GrtvF*E?ddzc6 zj3X3?Vbo&Sch`D>$@SumhsjpVqkMZ{gu0B!!GcvRqw)o}%zZnU9nHfO^`=fDW?A)U*ogiu z2h{WJ)?J&rzZWC&x>X7m1&Tq*otAOLfAU+T6RCJ!oM{&(7`Xne%m#cW)oi-_Yu%u> zt|%w_yG!w@AuV;9uvZ^Hj`I=|!eiHhhPv4)6*=wC?|w%D(i}HI7BNt+(nN7ggUdni z^q3URzIH$eO%ACp$%k~_4XRWoGEGRAXZ=-D#&p`Xh+V2t7n!`~Y+r7%-3>M8+}X5Y zwHY*>X;b<>wLMT%0M~V)S9S$H zPi2Dh(HOa?8b{kb(QEwB*CPdDF!VJ4v5d(yDArF=TA@wn`@-=eDi`a^!HiHhS*of? z9^oB|F)_2343pizQpePpXZBlkZff*qJ7RZ28`G;Xi73^poinyIvYRlOm2@|ZXrV5u zp0DeOb5KoP)O1697b}Vg4=inBd8DM6?93F#>+zMlmm&3nj++|YyiZ*9D?H@*;@+BG zseiEmuj=`fubd`m&iQyI<)eKKc&4XW=d>@K>lCrCCJqqDo=QtiWPKwdW12GcU5s}0 z`$RC`PhVskrk-}4YOHiw)a}S?^TXIc4-{%CXqOUy_5G_-Q~X8QYg@=>s(G92B4@xi z`))zAW_jshx9MWo!@@gxC^p8!07hHpsQ)|vgV%lX$(JVO7C!rhp69FfY6teujasQj z{9h_IGpDU{rYb=_DqmXNJq-;#i6>YgJvUp7Wu?#TW-_S5=bw%v_D+GFC)iEJk&njpNy|E-F+$R#sE`R(`T zYL>bxmE>;uttXfqmbHriKY)cxj`jamJ;lG83uE9DqDL{6e>Q<#SC6j_HTU~urg|#H zNAdB@9<~4szaNJ^n6WVdvA{3Dmsz}eDEIP@B5Ijws0me2PwUNO`LhoeMm=bhEVX*= zfSAP+L=U7xGCvxVK}kuewz`^EURn9l^XTx4W3Slbx#8o6b$tiPmt;|-PR2$GK|B+= zKQ0c=f(#2HCqln^oreBowKI&y2)*Z*xL>1BoX_rV8TyM@iwvE(%*ysgx7`H}CP}cr zdRaY2>R>WY_k#{9Cq@iX45Vow9HE%sSYN+_YlaCNuQsq5pgymea+>huVG|iWxABR> z&d6}UgUe;Kur8^LVG@PyTPm{{6gzxp$NF*|GqzZ=&+`F;chlofu4p(V*PnX`5DW@= zMs;wBj+Nz2F}6qNW9Wqm^i% zrdL`gRZPAkAnJI{i#rjdi5(+_@0A}HjeCNtZ^f5ydf`!Fp$PbVe|djP&~xY^We(_v zsvY>LBG_2^_};EFb1ISpZc13W>b_hrVvL%~L~3WoB|#k$Q5{lWbNS;5na@5&xt5wU zHD+n{pi?lqoc)uKW+{5-htGu-_KU`xtpp)=SwLQJu>IH}6YDrn55-r)Z=zsEQ~y`3)dwTu1XuDW86*~vW2o~yE?u=WnQImIFp zdcq9aRv(}D7P2p^eZX4+-L2i zTDW5o6PFUK9Wc4AR5TuHM{JK1;_LYMeEw5K|L*k3>wfgj#2Hozzg__hUSh|6>mEM- zS1Mf<6mL!#tHkS+(KjTnC%#ZiIF*p*xKt<8ce3=epf|3JVO z0IHI20EYX6BR|A__0sDwNE$b9`6Z;c>749g$x+8~e?#qQR?n+Nk6OrDZ3rLBoS;(7 z2C44UJ8c1T#l&k$UsRQY>Ss`8mraAC;r~`x|I_Bdzn~6U0m(AZ$5kke>b2A&AIJhq zOLcX0=+g$I&EgH0(Z~K~LK@v2N#~&74n95QMdxz58?EGmFEI~J+ zS8tKQrZhKT=!eLotbmSs-ig0qr!}H9U*6lZy+m|%-~!Gi9#19q10D|__mNyjTq``3 z@F_rCr=w_JD`fKi=G=u$KOH-y8RZVfeg!+OS4ia_JYvF{nAXSr@DpG;1|ki&ZnK2> zZ9?49#zCa9?r|y0-n;BD$#BgN41TwOQx6*z_;!Bw%`DdPt2P z-Fw>lcz;OS`j5I|;Jy8NcM0Bm-JNi$)ey7(qvvp1Qhjd+CbEAk@Iy90{lddIdlL~>L-FbICOgV|I-@J zb1YE(SHqxA3w4auw`Q31=ucaZi>LP@1PooIKA8YZt|qU`_aGB?P}kGk4qO#Ije5qs z9Ts#{m~|TZOOM)wQXK=}6|IE#LIQ`LML&7DYm}?9YG^T#@nc&;i{Ji%E_0KCBd7N` z=e9t>;U}_aL!ykkKdi&L^VJ&XPN{krL6O_Dh{)@}ABpS+#{Fc$F!*(2KIdDAxu_>@ z2p%O~NVC3G2RWy_5Y@&~dvHB?{<+v8K64%>v+2sz(azZzVmY|0{ zWlO)!ouOd`T8Ci;|30<3yn1V=P+Q=~@ZVfWX6u$=_?up7+31S`NelX=Ve#~r0GC>~ z{iV(><)@YnaBr>D9JKd~p8I`nwf2M3!8yQuXevo8F`s>bpDF=Sg7_u+2BRakak#H< zy`tZ>&~Da|O;DnZJW_M3MeRnZL0|4-#Ik&(RT(QsY@3Mt{**_u7tjkb-EN1Ci>;f# zj93Ij=RfXsNv^MTD^VH={g~^|wU3zNT|+aPgkc%M4Uv2h4y5gTb!|dII>Wa;y#Bu4 z7`RFPf#d!qX*6TF+s1DAz!__d_uGv_#~O_!w2o|@`%z5TT!t~&Ht?jmSfg+r%Pilv zT-~D- zz2xrw+KFE$|Am@4`Wtr&1}yt(-XVUA%6%gB^7ZNZvKy)kbS5t@WA-5*AZYX8rTK(y zJ+3cimGfDJcrT}Q+71m|*0JS;ex_cClNOdPn6s@vE*Om`8%Fcp7e;zMpjdv}rzOr^ z)_&$Nc0#{0HgI7pij1D44wGM!lK-yPWVB>G(6Rn9g8sM-eX)Rh#HwC@xoNnaS)lhB zY{-ERH2z2Fz@vRh14yJ5oBEEqxExna8S)=xs3Je+@>K`)(!*R=TC^DV)DN&a{z09U zWdhyHFBy(5bH&+l-`O1LKU-=>lkuHe!UfE%68s~3>XW&_AO178{*5kRcrjP~gHaSY z$qojac!gIv(frkhBZ=H;ycP?e49?(YP@h~`xCppj>33KEc}mN5`#%s+T=a(Gk}|hx zzk_)ToH2n?u*$LgJgM-0Hh%=Do&%0<8BpZ{ZY6PQcI)sPUZ2{5C{p`8jV&0)k#SF- zr6^Kf9!9i{rE)D^9Wavci@@vNYonQ6ZdwGT9d41g*4Xc>0NHOp{o=EXI+tbAdAJAP z`409*8|Pq0bj(t)PEf5%k%`(;jJd>zrGSO`-1;Mkta_FpI}Phz%O_#0fPt;les3J} ztgv== z1*XQAv(b%(3EW?>ozZb8@y%}vMsVRj_d$|OVs;EkeF(v#uxnL1x^Y&dsk#T;MaTDH zOu+gEy^g$i^Zy=4hp?1lXlsS5%H&`Re8b&(w?K-8*`qe^pkH*wgF<4M^$|BjOEWX0 z_%h;x;1JFC*Hfr;74;Jq#;a})v)QhfCDa$G)(hAVV|nY|`f7_O&qPcb4Nn=A7vUAs zb%Z)8`gDG@sUS-8ocE`|QLV^#d|MQ(!wo9-kS3(ueRPleBH&iL^@160Mx1&%WNxQ^ zsQgDblVrFgshxJcNmH~?Mmp;xK#nMw{dv^HcRMM@s5jbGKbo#c;^Q^+b9@7jhK#Wf zj4v`>cJG3dpV0Dkx0~^{zsdW=981lXENeH&EQ6r zPgaeC&>`bvRD5B!YD!64HdBv|cxrmDIY;o7esWRf%#2#grgq zK?yxxCj|uRqIWBR^D-51`Z?dgh}dd+98IVp5$~=a{Q``=&Z_3&Ao`FUUm+xy#k>_z zyGkN7!7@SAat9%{;50oEwyr?lhgr@RH!@}lf$bKbOR5RfkGPK@oblTjwCjW&5=rkc zGN&Nj-rpKnlHu*yB|8VRAv5wV?$9pG;^s^mHUYUhJlP zGjfWHm>x0BGS|P4nG2=2{B{V?&)W!;&O;~^7qjm&m#;iQG1hUvdkp@|7{42Yg%P%* z=+W7$8vR{U@Ar1JRISE?nUpKq5&o}@8gHl!P&p;Hx%b~Ch<6;e*4hO=O0`5kGP$l; zyUoq@F~~&*z#alKz~6cKO@Mw2NWuU2XGQWKJp;ExefHre<8YhQX{mbMu;8$Ywrihr zYE80hh16+fA<9gcarK*hTx_Ddg!FuNLPT?;0e~GPq3jdA+a8EsetpV(oqpNXdFdnc zBl;pb+~Bb#KP2)@KtY6zuS1Wo2~s&8?`9|-q!Np|Y837~etpn+9eFwGco|8El;=cL z7#|yJqvk^QKwCk@##mUYHk1sGb(MjKVM_@n&6i5qdQtWtjKQet+ROB|q7?_EtS0ZR zP?{$$_INXa`yE`BqB0c9j>prMAR4Xso(w+sX4X$OH?t)3TQFZ#vj#BHH>zX-r@?zM zii-~5Q|C}A#Xh+z8kGZI4TS;!!HV{WVj|< zye>kW!yAtB_162f;oq4LZ4TO!m|tIZ!zyWmfw|&1c)$Moy#601#?GT+X3>Rs?iLFC zs|#KJNNuOpi?E+kz?1@1`z`Hy!H=T=yp$09FZ%l*ta2)PT8b7zLQY0OZ`KyR$5QnX zEm6%h6wfw%1YQBtA#Vy$oEX{pX4PvW1p5b~NgE8&l9n$%mXsD(4|mP-Kf5>KScmy& zK8C&uyu1lQw1Xe%_qq%9kGK53+#F`xq-cQVd|ZETPp|;c!6$k11&iSdB8X|i&(}u& zgJQW6Xco<`JwTbZwG-E6U8XFdBKQSWY4=VTG;q4C@v&};^yZ$%{^=!jBAU|!=UKhz ziuHzzS_2ku!tIsHcK|8R?I|8^)ex3_1YspnlDob%cNB4`J+45yHf@}HEtAh44IVT=4j;TOkaf%Bg zqz=-xM4apIas5jpn3TqvF=qWe0ISS^b-PGIKJFx7a@VPjA0t0Z-J$l0lqS${!^r3E z7v|%ezY!fB6;Ceb1u?kl}!?cA#OD)a;%QCujMNYklVs^ zoaD3*hJR@?-dPIe7Oqq)p_!MD^JLyT3`zHc+?Q`ca6+n#MvA=K4FPHZ;O@oACAVKfZu@Rv7S1tIbi;-F5GMM6U1 z7)0smTBm(WVq#|Tmm$_!tNLzQBo_IzeSOKdXtK*ji>F*iJV+)z!c`l7g5aC`Ux^)# z0*L=qAC3T$|A1y28dVCPLFcc`9s-!iu9b;h9=zsC$X%bJd0j7l=v?YDb2k2W*tAT- zMkJ{fbl32IBOX|y*b=p9%ND{hMZ|AOEvP+2Q)w0b&;> zC0sbHKol|Oc!t{{UFJ%WWmhRow(EDYA^7dl6t-2i1*$UivwYdi)37FzBS$75dq7*q z^ek7a>34#zl_6|dlRiD&Pk5i++Op6$FT^8^8IAqA?~Lh?r;oymNs-&OcDl)#8n*Y1 zalbva>})E$eCc?-eLV(9-?*OQC*qGQZ>U7Mvec*FPWrjRgD*98m7^H9aTl6?3;vi& z1;qRXo6YXPdXq^?6?4RD>NO9ncw#I{PV2Q5q;2%UDz$hWTGhRsUQi&A)L(zRHHT>_ z9wu}nJ~G+q?G;{^9_NUEKAZtdAhsB@JO8Hu;hdmtZ3?h?sUx_{D4(0z`Pzp zoL>b!^EQ?zhb2bfDU#eXpI;tN8~FW{$h|M*>0}1t-C>d&=q zpW}K==Q(iVeKuqo;BlH1zNE6qry-1w;RvJms^h|!rBHVsuoa9qzPl(O_hcUA(II{p zhtOK@SjeHFOHWe{2GZ8o%c+faY{lKF2)28Y7|s*reCmp&^(tNuvC^-Ll85v2r122( zyDZ(gaKE2wJ|1%0Fk6zpWRQcXqvdeVGN=0g* zq*Pw&CsKhNq{oX&7WM+KQ-3{g^8B|a_+KLm;y8Y5{(MPWyyWYkgBC)f2_gjkN#O>k z9h7x|6yEU4+VgVPBCy=~;(y%G@m!YaGJ}2Ih(Cc&aZy%jhRpWg1!Vl+0s>8I4T(mv zKOfz$zUlr2^So7%>9w>s`Gy4LvhjQl)%zcYS&=5tB@4O1@cl4HrdFb#9#=;nP;Ihq zHg3BjeSl`$X3q#vw!AbfU#0$wr2Yp_t^LDOK41{$F7jJNQy4++wPNqrms4n6C@J;Z zz0ni{9gp)sxO^b~*e-VyuJhC_AP%jYLhnU&K>8LoOy3CwGR>0i&)4&$WV+}0?ySkZ zzg@y{Soq(+?;_-;GD$X(^G|BSEUf=|I1Qv@ zEV~0xG6dMeQS^S_NVfWD$#aY95&Cd{{Mh$?EeJ6Nrb>Pt5u4i5MOPzBqaEqX=}OZj zz|9^+U7)e+QIl8oOnBYYnA59TFH^H~3a%{#g1cYP_lGnnc3iN8Us8eNO70Y_w)$If zPr_wH(a$;n;i6KIUk(D>2PkOa#Q+W>%bVlHdXIZMAUgynxa~{i^F2Gz;{vDdbH?lA z`s>;Va_7a_W)JB%F??>HL3Z=5+1ECgbA9V=pUt5LU}bBr{&LM!eLoAu?9iO}H-0h# zyP^LbKPvwjKUS%!_mZA~iT(RT8b8DSWx$vllq&UpZD4I(RNC>+9~chPVKzkFA<9Xj zqGqcEea5hEVuTB;c*>J~+vsIUSXcR5&;3m#$5l9$4=6shxrH-I4)TF{uSsdKKhoj9 z!#=NBVH45T)i%z|l`ZL_{bh=UJ>xFIm93`9Bs)3mC{u4}rhIvV3tT(Vs9&b0>nu{P%9$Jx9n7f@H-2=N$a|+@Mp|7%k~Mae zZr=8IUCp7;*$1WLKV-9vvidjhAdf?T#)gO)nsuAT9`${dB;1k$Rx9Cp0Ig3$^1$`9~9|uIa{(>fP*tDiiyxVXLuBEE`vm$ zr{4-4WRz#%<)wrB0YhR%|A|AMtgVQf6B0x#`o_8G{3}nUyqyY!z3=%i_os{uI4i4y z#ly7l-)|qxaXZ&vucF$n_Zryyt#(~U{`z>quFw&xG+S#qrtm0Jesd|{Kn1y69@kze z=Dp81^I`+OGm|BXNIo6D%|reu6MSVDDBEE2qIC<_5NFWez~NgDI+Y65*6u_mZMs4C zZ0S@ohHYu~L?$|VOkfwE{Qr@++QDM1&VY(tJUF_|S^!)YrV$BpUEz8ANfQUaJvIaC zKGza5$3Mzqrrk`pxlocgqOMmq{1B`X|5T-3{;wPaNEn3wrpt12xN+TCNH=5W zUw{=gGBh+S$3)*5%tF%n-gLwv3QYvl`#Himsg$YjX5?4{#Du{=j@5?HammD)4^9aD zld-sah12;|6D|yaQMB)^JvLhmxY~u^GCW;7#rPn~3W44&Hc7eDaX(N0$ z!Y?ZmY$D!Rqb;1SK74$9GV3h$VJiPqKy#i7HTz8Fl6;_cK6t zVF$PK4yY<4`Pxqa8ehPu(;MamqQ^h_1zw*ny;_Oq&%hTMF4-2O6&a&7uBTWCF-c4s zfA1Zi?rf2YeT(JyBS9`%gi&d5U)O3)|3O_I%n#=p1$f3zz zg9LB4X6`xOoR|TkoS#(?IBtIM8LaIg9M`eYwL9?3gOyG~>n*K(hW%{MW4K)SIlAeq zxrAAmqE@PElkN=8y~t%!*GNc=u!m@bZ zH*=iEh$Jp=3dDq1A=jH`kUn5Bg~LekwB4p zvqkmgsjST%0CT~ z^HGU(8(;i{T>IPGkaB-%8rch^THEI?p;_?!xT82)YOCqourSZNeP_G17=W_Sy6g9{Jr#K$>U{4w;_Rgk4F4wrq%QMPe*>&T@I zsFTK#Phu*(U6cNLP1@;d&^CBj2R{J)Nh3A(>NfM;BjO#A`;>QHq;vwRJ)l4<=$~fQ zR_fh&VLcN2HUs8L4#fzpV*%Y;Q^xq|rW~L7s<}2}XWwN6%=+x9HuX=?9Qxo1nUoC* zmf}C1l{=gAMa0%!%6|JWeGOJ?R=|p+&H%`ravkNgCAFWoGi$51GqHu!Hp6;%*l)y! z-X^!%3|I+nk62!Z)p@M~uFJ}qt9ri)4K#k1^Jd|8W5+>Z$u&& z>PO|$y#{fKtEb~!cS?|3x<#GQZ1cijkd_cpYQR`+Oy?dVSwzBa zA$UG9I9|Y_QgU}QPA!w+*}T-af^3eeqGcfcE+GfrG!1o0%UJyynHlTGm`ba? zI%Kvq-p7QF9#^C@es73z zPmC*K|1rnyk4Dg__D?S!tNzgww5=J)@ywQS_jih9p2{tlBQ|=iCHP~fQuZxFrnG~d zMh$gj9cHr0(8VYrM}U9dV$m&MyWu54V(ES7oY}T|c%X?xQpcqlCvO>U-9({Ijf%eG zF3)0f(QyXT@4yeXLRhA01L`U^0{m#lgv&rS$+SR`pLTCdi7<}4Nqd*Fk=)AHU%&I_ z+lSq|AzZV)U+N=9(q)kr%h)%00xyyM#e22=O7#3#-U|0J%bnJx;SB)+j)~Z;t8kKXwhGKP5xD5FJkUKYbST%Vv@o=q&QULgz90LxJSw+dwq{xma~x zcU;yN0__xJxD6vENL6phxAfIqUY|6q-w4v;nzgxI+tL0S3PA2eDD1mEm>!7UKUi+4 z2uArNZP34g|BXN{>CTq?4=aSFb2lB95?2KrC!CV zn<;F@JmBFEAL@+@K7HaMwYorYA&izdf4aZa;&a;>jLE1Vu?*8laY z2wd886lFBOvE9GROAie%(`ew4!Xk;r0BE<@)isaHv61jQ`IM(BFWCL*ekQf*UQX;! zdeX#5qtdIn%!s!=|1?2zKoa=c=yI-MO&E=8vdHi)Ff=Hl7EjV_dWof=ZQ=c3qi%tR zUMT;qs#CBNhwB#Y(JEWLjnmG)?c<)<mX z!>faJsx8{aQaZkVa%XVYkLR%8ajFfNW2y_yKbW<8UAxsmaVf!@QI&V?Oe|Y58BwvS zeh+tfyoeI55nPInv<(f|-t8bfUp^B7o>~8x>k!}t(;LwYA+G^deg~bn2{v~oFx8&t zKz;bPQ$TP@fZjEbzYNqwh1)31pUEgTe&x zRQ4qnj!|-_+iB_%{i#nlT==)JH}JjAx)wcHRbu76q_G&T^>TzJt}%~^^7D@dN#Z9D zSv|-Wz{|(YF%dT`Jh2cgS<%hK2GvGD-%t^Pnm3=>B(&!!<1a}$9KBkvxS5q}_0?nDJ3z(%3HE=-hTsh}z37=5sy#P=N(fS-8&h3lDdO+Oq$M zPb(sa3%a^x?P1@VL- zhL4>-6v8aa+`x_{pMRBQu9AlZHyn_3Co>4 zBC-?MWR(Sm7YxE6{_FYqq31vEl@Z;tz-yX03ZH&8S=Wby0(MNPD}KjXwHhOIqVH5> zJ)V;!o&Y6iIa4U#PSF1#3;JZPe$wJiwbRp#V=jz*tUF;koqx}>>8ujiaq56})(+^S z?4nDYbKBg{@pbfb`D(;J;4Fa0m~-Uc0;`s2v0&3a=P^rXIU`_yTAEIn^i^X@!QQ5< zmj_%Z4QbTcLi$?-t^J-!=XaqH7sn(^eB>7D*tFDguUv3gy3wkJbDYZUoMCvV*b1(* zPp{$ODoxGe?_520!6Mi7|M|9LR$nVIbV&S71)Edvx1+_p8g_YiOj@#)=n2?V=i^Ei zmGWB8C9o*BY|>k8@^kGb`q!>!8#D{Tg4@8iB?^&*)3f7svXZ#_%R_Eod^|nlLY8)N zwuT5K?i^wkx{BeDFy5v+J$jW{qYoaLvdAWz2tD4MOTRxq`XR9L?tQ?+KU(*?N}ca? zQ)CE4V3QzA?V3EQ2kR>iB3LM|8&r-KTYJ+Wj7u)%#i-f%2gyUqZc_KtW*74*P+zX| zWE4YclY$^suJ;qU)l;fvSWXDh20;`tU~DF8c2Fd25L9ujV1$h#OhR{YVNIa`hA_JN zP%=|6lD%yLTq{X1nm&p?t42JD2~e2CbEySvYoEVTkHK zv9LM)t%gZ-nIT;DePkfO$j%y8C#3)&n7u;bnj0SGBRd*KkWLtDGiu;4C;>7{g=+$pMwE&Ef$Z*OoNKbQE8tt3AD!YzQruV62BasAP)HTu#{>@JHM_^Jx|g`o zSk;;BtV7Kb_(UdHkduLZ;0bfLYip-qt$MuKYJh!zyZlo8T(5by4J%kqTc4l%LCv_R zHY(oFB6^LlP3uK;8xb#tQ1YJePuz=Xm=m8f&WCB`!+hFFg$_(ou*EEJ|MBY)8B9uN zavQu{c6~T3gU|54*9l(kYb`)8(-E>HWuIaewe-(;%IApqCVR2k@_r1o*IktmOb1nf z`e-Me-qfPdhE*(1Y(1dTE&oOPs?B)4Q3tD0bX#cr8w^|?HPfI22~a}zyyy@_>x~%^ zlf!M|LS&e%;O@HtPi#VIUt&1`RjyGmxOPnh$5+w3tPF}665oY1o`mCs2_bcI@$ zS3n@@GDjc$0Nw}fD!pXF17>VVD>L@^K7LyKu=h5{!ZM5yQomuGfPv>+H84pv0H2~= zc>tKi@;lD6DOQczv+m*^%PYQCs}(nztCSNiXzkR)g2%^39P=!6KUzS|^kIKFp(c;5 z|ALyWkj`-KFO`K0oi8B#A$443L1=de4%%CTh?9$Lnm*gX`5NS1xE39zZ03wn$2*Tz%G zjBDzL>5-{lZX(A9YF%OO+~>zB;bv%F?$(x8e$wnWALJSTZUjubC2G%eUVsB#!+agv zt+F#xePe`$;4c3I)VWalBuKsef#fRZRCpjCcTMO@$oX`kXiSI(L9U^}p~&!D#0Tl-q$5Mi)bvZa*r6B%e+&trJmK85e1$-Vl*35bAffmysXQohu;ve z@5{&eqruP#gI`#y?>T-qW`hwaB(=Ddh+U?>h}~Dp`1IiK-!AHZb2?t!hFAnOhPAKS zmsv-zAb$VL#}H)@v=HrVFiwLtbkiSt zpx)G&@zH=rcctK=m2JvC8%SLFi>&Z|ztvA2r2)?!;q|0qiI&ci1EUc((}FuLm_nXhSN4k9n6>%{!pxN z_v0i$5anIop%}smj+KMdTc^Cy?>D|NO&OJyxcA?@Du2p&*^;+hr0jID2?pW7IR-SI z%vTZ9Qpe3pk%2^80XNXiVM)bVUIB@w^1wi_dWeryAh9&cs zbyL;!yo_&=*p~|WBW&>1GQM%ISJm8A(_diHQSNrR(8%ZLhfOI3`{&Y{BFB^*-M<`7}8E-oeM%gG^85} z<&i1r`_DcCoOEs~XzX@^(m7Y&$4w+He3!+n(`o-KVz#>IH=BD^bbA&(34E(>b{?=` zV*2piI*!tYf#YkxRPQX>6B6Uybx8Lwi%^OQsSsNZ>3rUAeDK+cIzhm!$>4G9-&_ht zNZ`-qC-cPaT#AEDfRU4vJYjNNf8T@D+|05*`TCFGs-$;(wW;ISMk)S?K|F75=W_|; z+Xd<=yqy%|FKB;45p29gDMv^YMrrC$^uNq~iNN_%X((NZXo7rJ2V~47dNLbvzMQdn zt*{wbKGke%`rfxcZgYeW-4J%P?MsaHiS(6|wTS$o>twzcd?jtEP7w@pY{?+V9qoYm zg@jJD;UsOH#2mPyjCrA5(fb(_B!z&De1gngSbN5CLQ1$?g1GXd5VbV7EXEkn*y-fX!n2 zWE)0tFMY)EXdleSkqf*crXR*I$H=&B279s?g~Rg}{8nFlI>5w@gq8fVKKy10r0^l< zj3mWFwEETqXfJuC^44;&)agD$5%VDe+pLFTMox(s-${>FE0Wl)mKG|+P#zke2oeI> z{Thl`1`uvyy=UHUIcsk_E3$>+lF2ctK-?NM>8b{;I{O4BlgVvJI-51=$73Y84MGW+ zemzpd4m(+Ca~Am*XjHES&fEJOC}kZTq1S^R>Mk-C&yLWeKpN3|6ZWJ zpT6X+D?t3Iu|hE}j`1^H*9a#kU^Q?f=y2>n`d`a|968$P{g3jt$J^UB@+tO))N8rr zl^SO(6_IjTmxc#24w=b0>-E;v^?_h^esXTJ0E~AWe;x&{Qo2`W%EklDgC{)f0kzvQ zXZhT`ROhj*0{2t2BsumQf;leLlHnP3bmkdCP9gs%B*6qCHlxN^20yyeocB-NzX-lE zMDyJwR|w4#a9TVB8n}h?yHt9zU*6kfTh$)(WSKO{(%CZrKwr+r?@#(g?(Ncl z+-$CJ*Sn0VAFY;^T;u#%7C#ZIe-M3htX_!0kc1yoS4ATYodI9}1|YC&{>-{S8ryue zobX(J9y2YaalLciLJPO^AhB+}L}=ouM*AqgTIHXma6(ttEqLPWF?*(7nBQR}|MrNH zxiUi9$EE~RD}g5I?A?7&0sT%0@k+~l^u*f+ioOy~%X)8*i>i~A@5hIS={40BHWh0e zjj~XUvs!Z`*RSBWmMp{~1V_DZm-bg=(nw@AYIh2S$RWnJCs_*Y&^(L`gU;SSi?s@igvf!lPtHkhX{+fzRc@hTONvPPzE+k_Zp_gcNFsR}R$KLB4&4 zfPq_A)WgcETsA;tUYG+8*+_-!CvdNemg~Z#e=j zq`vg@@@kjuj2VD*lSqp4_wydha6^iS*sU^1yZ(Zs?cVggQ37R*uqEfhr6C&a{Q^8; zE%dCW>p7%R@fcAeR2CGb5UcL4*SDmONp9%J0_Jm!w+pd$;%#}6XajAlE3t1NJE`Uq zw6*vpoXaaLDylJfsL`2RmIp=R9M?@%9A-P94&kwc1hl_&B`em3yGjr4nRycUc6WdH zF+bQ(dNDb?tgm+%yxw-K*LA^NSe$i&i$3nD3jk>l4kSWG8+Noae)u0q!WsAHuT@|jRRizgQ=v@sjDa-?nKDD z!TqvWaFQ}4%L`JQzk}f`S8rDyuFO;REL_^`iiyUs|Jew18J1fO)L9x*BY3&#wgoISC<~9RDwVF22eK;TZ{AOt%SWF4{F_>6WTBlNl~%wUMre zD3T`AOM0T^J-^9D906ZLz1jraRQe~^)`Bv_lEf zT6STLNZ;Dcv;EJ(hMaVf zNO=!vr4o%-dA`{b3?54?@d@8|-G}nq9&V>lJ~JKrB9Whc7P8!&zacHI2b%i{43<&* zQm+IGuf(1$`kiQZVy!K|e+Qq$vt=y|WV(!!0VWxwnpD($gqOAq0}C6`4L9G6CZ}sR zq>5OR1Bei{qF-7I!NKUaNLk>ye7J11&+HR#eac z|3X-8?Vlm;Rdh5YE|E%W%m zOgwLeH`=Z?yeQ^*{7vZt`B3nsU#_^d2A$GnnzpAK*QhLib_Q8O4ePA+Rvo~#9V1Jv zss8j7I1*@FH*9hH`fgvq`qeP#`P|uiE)>Q8iLc+~TH6*Nja2MdbGNmtI`_yu^{ij7 zwY*uJ=KEvG6ogJc;@`_?@vg>*3&W(u;m#f4g+%k;sb0DVTWr}+EZ}a7-v4EIaFEyV z7Ngz}!!80MKpxc71>tkWK8k83TfJZBin0Cl$dX(yHr~y_mRiGLd!P3awvPuie+HV6 z%=zwj;=Ew>oSf=!znhnb-aW<6abm0MmA{a`-UV08{tNERs}Dl&*t3G957KWP;G?nk zH*=-u?2Fi5Z>(R9DuClf>4b~eIZ!npM^)gnK4(8ok5|166%~{sz-6RJN2ep=+jU;dCKfV4R4NVTrUBXw0ei!qk#)$;f? zBziIXHZk2A{)QO)rkfx;Yg>}b{k_0AViHLc3XX6B=rcwo2w7N{a}AaCaNi1uazd?s zhX2=V9$?4i=N$?Ptub2z@>spVJ7&yPbG?y8t7Uxc*`hu{Sy6pCmqQzg96tn{Yn}Ok zNOT%r(m+Yt%#r_ZJd^oVM;Yqm3_6u?8izVNisl%+*Nx33jkD1H@b@pp``T5^LC4{5d>uA>+C4C>XBc z&yw$OX|}q0VgE4jt6M~8gs-AxwnBE3AG2E{w`9StPKt9bh`T|x+ux|-?01OcWaJkS ziVH?R3!?du?uuL;lTXt)WtAqOek$R zGm%;i7Q3R49;d9!-xc%IYBbEx9i=!*eE3d&7zuDjn-Kq=IQ3`OXVGL9riR#&W^S zJiqf=`$ZM5;kZ7&OsZ15F=wtI$)Sg!!bh?)18)H5<@|=aFI~woW>!z0lWn=UvL2O3 zZ~90Rw(N~Nn!gbIY5aEoW)c4zr~DyYC~)Q5?VD3Vn^kT!I=W1=?=TkqI@UoGW_@u- z^fOE}dZI{XqmwT~2b|Sfn+~xwuMr<}h=qdv9Z09%c;_a7vrB)EMqXBB z>&__qjmj&)7h2{FNpJZavUaqTu`6>qK<;TYNEH@oUTE30|F{I$qRuy%EUv`Hve(Zh z7fH}F#IxafFB{g|`@Fr@9`1(8JFvOdi{#XI?Q{ucwfSmL)kD8`)giCie*;;MM^N-! zrnP9N(xF;rBjW*zkdplUlcquxI4TKb`e+b89FgIL4v8ri-hXJXGH+kjl~}yJK>t>1 zC(RXa4>9_#I)igjn0JcsvtK!WS?nFkjS`Pd?=wSWSrZn%S=LEhv3Ep+yo%^KuUe`t z2%5_Ac>3W(M^obKCmAXhlnV7!w?eU*hMtRs%sm(hxc517C-hs@ME8)?PU{_mA8bol zOEAOu>u;>Lgwua(a@L|z*a_`HT)co@ivd_?;t&tb6He$w-IC(XP;h3kfV?GcW~Qzz zKR#eC%$u2FK?xhgNF^T4V~Y;o`|>q_7n?w@tTL*#H}MrytdkWh%>(^D&ScXx7vpSv+ifpX=AwByg@qZd+NC&Y&`vRGr%R- zPAx?|@bvYcPO*Jd&%npXhDsOCt!x68 z*XOpUkhO`<_B#{pgPVSB`;lLS%N6b2UZ0sFH_7k_@xIy%GwR31QZ^l1C zCso!NCq$De!=`Lw`+B)yIngh@UL=(xlbmx-J6_JPy0HUkwjrl1hwm?R;}rd0f{frV z9E%vZxRJJ%6$IN0U;bmmpYkGlQV*8^4MvkE_Gf*_*ehw8Pyx(u1`eEh3w+ zC9n~&V>ry=v5bwl%J5ub;aPQoxSj)Tj2E!^IArIM1eCs(=jiGvB}RL(wUs}IpXIqX z?(doz!^CV0@5LK)S^To^pGCWOy-+`Vko(^*fa9)bm#wj9sULdVOUERuaDODdJ}1+> zHWEUTTPe};0*+!iI<3HfFS64sKxj=E*9?p%n;bxz0JG&MoznQ%+Cp+|%lxn458m=k z^^3SWI6VWnsm@*@_(rxEu?H0L3HFtNODag}Cl`+Mzg`r7_Z#+I1@dJrB>5-Xt0w(! z{$sOTAh;%M zT5a}Vw}hjK(oS40fq_3y{Eh2VV+@D>H}Y9~Qaxq_wG2lXsAU&Ioivu}_h-Z_{}we0 zR#QYL(dXDo*ZJP1{I^o}=gZ-1+8hKX2o}Nv?;xkdFve9J+NKI`*P`g!Pvdoug^klb zk;fFQ_F&d=PaWPQo!oyITSwmMYUi?o>vV%S-JanQ9*1A-d)oT;UkG=`Z2707gu7g{ zxrvkEoKs>H%5I^=UCor@&LlyH1DDLY6~H-M=a`f-y*=wR5=TN<;u!4A{JrU1x;m%P zi<^Q-lgxz?`u43!q*KU^JDYM&`sY*;E@G``@fh*a;tYCvuM5`k>`$H&1s6VdLpKYN z9k!Y(xWb~gkAZsw66ck{y3TFZiilODxRSSV%6&{m?lX}H2Xx2#d>&L`gW%MsC>Gv@ z>(p?1-eJlthKqoU?n`~!=sw;xOy`kxXPyu#FX3{d#mM%GdsiBIO80e1NA$v}WvuHW zO5__+2>&ppVVpCP9HV2L*ZUI|LmVzPJMHxu8JH`wxL6iX>C*xVPUXyK{?q}%Gdo3< zE$GK_NKuvBQ-Xi_)#oM-zn@JR9Kl?+OxNiy?30h7mF7Vsb)Sj<)SGB#yMs(%3eB~N z`vP4+{aYRFS@oK?phAvoFJl9D;af3FF^7pdeiW?1B$p}CHSdmomTF?XnY@T5EkkFB z<*}Rc3C+PkjFHb-zMEK%qGr-rIlhV)mBq!+y*JRBcmC&?NLE()kphtNnC=tK=EQao zsJKDatl)Vy*LgUyKhPoewh)r@biAuv@BK}WzqB3h6O%5ZA=C|X)&iupC9`5uk*Wh* z=xUvl?V;WQLm!8=!vcLMWD(Ln_Za5=#G}i8KqUn+eSp8R4k7gUVjHfzRMHoDB~LK! z?btP-->FV`ZQTwxZkc_7Uy}3Ay>GUpBv7o$NK@jS+!b}jX^saSDXl-7sD~U>clf^U z!q3LTBM$>%=OdJJYm?*LdHMQ8OM?U^UmjnefD4EJ=9&ZIm>}ET@hf}@=E4UPtN70j zjAuM=*DBDtW5{ebPY*ZA?tv(><6^!-i+9#kMtYyU`y=dfq{F|AL|Gr{Y%q#Ij+Qn%bg0#EivvUXdN%q^_xCttrf+MB6$%v zac5+1TpXAb{P8V!)nXnC$(@J@TY+Zr!8uN!I5c%z@s{s!@&ax6(D_1cV)^-rvrK|h z7|e3(18!w3ElJP&y;@7ghk{8UN>8P&8UqXaEP>Pft0K+x&8ma&CxOjSUO zq>~a4xWdr?QOU?`xl&j)sptLLMU_W*9vbgsa-wt5gRe!%RvQVij#th!ajma3vF2RG z>W@qQ*D22N2qGXz4aB}VU_TDixRSUK&MVKA8%Li#Vl{xEK7g;!?#9&TsyZbR&)v?eSy2gJi({9`R zfs+0l`u>9G&D)E5=CDC|-tm^mi!OT)#%vBEvHP&L8g`by(Dnt@L}*U+0B`JLQ<5|J z?erc{)oA1{`P7(kq-x#6cKX*Xqv!gm{{d}NIO_4tW5C`e`kYz!8F`{5*5-UAoQA~1 zf#v+8NBM60AymnGz{`2+@9C-_ASd6iUWt=!(>~T(raA`sZrnaKWYk<9v*gsxJ$PzdZw^Le8GQf=Y`W_+d?*<4!Y zUj)~FZco`X*V_wzN^SX8aq{&`cL&Qa1Y7DEefIBk-1hXs3q5dXK&IeL@Mw#4#x1Fy zZjSxlN3*P>_Sue#BV-N2bItfH-(TCABFTh!vgAFi&SQ_tuTdnnx@}OsNI^AkDzX5= zz&Bb0GzXE3Qs!M@RJ(rAY>ym9#qi0ZIggVk{E-vfA|+#q)%NzV!2n6u@LO#9 z4w|jkK1^r-f?M)y;j0VTf%--Awalq`E*N;UGq&%ej=<6QugE`l;wr_zOA`i4wj%Qu zrK)x%_E2S_}+Qf}Zb9 zlwW;6ai*Q&AgdVsO{0|KA&2EPm{m0MlLXwo_)-Y*W}_t^$ym*By}VWjNhijWJRx>U ztsa(a>M&s&!qmo*L>Q7&$O``aFy@N0$jXr9s$`Q9A3>^au#1>idbTu=^PG3VI4fb0 z);-3fuD_`Io~~E_ygSQom07`rU`$|=ms@gHOF60X~_aGc3JVgR!x^jMsvl6>QUR=cD<87A%>{$+cp*`kcz%D3oE-SwCr* z$S<9tz9Wwg9#3nYvujaVh$v^XqNuS&e&Md#qC5jl1Bf0XeY?tvntDeZ77Ym{4XU#* z3!Y5)Mim+9WyV%wBPVl{&CX*4jZ~lzs&OoOEA<_z_%Ll~>$rlf%((@C#Owm`a5$~# z9Il7Hkn*h;8?u!wxOdXi-#+?;a?917g=U$d;QL+5@ z&Q_6>y;0un{}6Q+Tx~Vbwhh6Z7HNSZ#a&B)Vrh%JyA_HP++Et@?oM%c*WeJexI4w& z{pGv&jrV>+#z;=~*=w&kbr|Fzari89x26p;GiI^7VvwDaJ9IMR!nTq{*TEa9l#A?( zt#ATDghqsmJ-b}xd2M?cHMa$=eH6ao93iroL4;n1UZHb#VyN5Uj;R2-fB?$YJfHF-2u<8H)(+B6w^Eo59#BqMFG*3h$ z@%3HzFWaLkx^L1Zt{24*XPg2KS3hx;-xmQWYZWsmTAEAEjUR73TpT7{kAKMFHAhU7 zWM!N+Igi+!9;DOD_s2IW$XGN;#Ywb{G>%Kw|98w!gRDGFPwcm8gCt7lBcWy!SrQ4{ zRB;*f&HT22K=bWdKn;|=oL^|X_QQ8<R=LTQb@qfBpf)s-sv!}WrBVD6{C zWY#J)X-PHv3%LL%UQzwsH2pUuJI_I@hkOyE`+tgs>%d z&XQoLr?Tr70DksHjKDQX<54$!7wfh^H8Rc9oz1_w*0&higYL-&ZlTpNz>E6H@bq1l;_bNftp8)5q8#;b*DI1 z?W}in++TD~F^MXO4jAM{8(0ENx)qbpH0@l740&e#n5LIkij@nVN_dVP=3V|Lazcd1 zuO4aKP*|p4>LpHxMd|P(L|_;H*BR^rLx76VDLpl2_>?<-jsMy2h0F4^Sb0feL+A_a zyiKM!h|q*kT}ivx9Z?=NJIZ@?iQG>Wlvt-^(4=SjyEO7q94@{lrtEEIU;!E9kxL&8 zT=Ry$)*17Gdg>;V(Gu=&Lcb|XPww0ZU@%#J!#$a6DyjQw2>XZ|gbfmme-V@U)rAt} z6p_G1fFu1>M+Tm-j^;0MiYG`om@aSsypkR06=^Uf3Cd0%5Lo;K^8PUOK3DggjlHyP zqzvnDvb}U5!DZ|nA`crC>rSZ3uZoWu*1;Rt;H6*6f|gH=S3OJltq7VtiKf9(3oeNM=pMj-tE8@Uo*0oZuLar@(=@0E~e|~lV(wfnjdCJT4 z&9PQLe_?vL09zL(M73hNEF{V;K^|wx5<7VeNYlnN;koW%>e9?4_hgygqZ7MTF>&#W ze$;s7+3j&Pz=YOsP-J;lNrb1w*m zhM#NSc_uxDJ*+_60SSJC3Npyy_@r7{8{WUR7D*9e+(ndm;b+~}J(|B@)R$ccq0bZTaZ!Px2>yxpfg{5$vlmI6vk17!#a`2nd=;TyIZS^vZr8 zB2>yqOFyZ~p3urEt#L_{0y*It3+KP|o4@9<&Vx@_6Btx!f_u1&dGnC>Qsm;$it-8z z4TSf(4@yz+_=Ou!GCPM1V3JBoF^#tW)z*8aI<6094n0lIPo(sM_m3OESDE>G*%@Jr zK~{a#euHy0V+oyq3u@Ww4066FsA)e6I8ERjoEpFVl3Uqr>sJY=I(7|h*-oNhw^-kgJ~9MS(;Rj7eN?^no>Z=%{kyEa zw>C0&?zz+P+)!*71c_dxJJWkPSxL;j6)h~29iIH1h2vqcT{rGTNz4~vMqVVm%{0iG z#xu956qm%F|UfnO^5b^lE_C8=EM?B7Nt%;eW4_+YGM5TikrA`w->@} zega{@u92gg&lJzgw-ht|beRH9jiS$Ty2OriPJs30R)xZ*1hsup>jz30FJShcH1~c%*0zd>1!cUx|oF9bjmyc)5EVvi0>W*|w228AA!ne-DV+}TB z0tYSl+Dn2=Iv;9ah>yp?7et75u)c9u5#j%?H5+f`4bsmv||A5V#ZPVb-=lV|V zLzt0dl~&#C*G%Osfjlc4%KGvFA#$gv?vi0MJ@7X5feNrjgAuaBWYFZaz$TX#kFO?D z^=(`8u~OKh_gJx`5;n|dKY$Z2a=|Tko&F$3PD4+3=lb{AE7m~Am1r?CX9Z`_K?u(~ z;YeBwbul07N2!oS>7zo)Xm*=~l|%>UmwQ=ZwGtbp@ur}sLhM|!yf$a^q>YVe-1AGO zUIZOS5feW_b`{UTS_P$k!pJ212>L}P>|&{*#6+q}w^=7=gc#i7<)gWRO7G7LF`ze| z#@w6XqYMJ`OtjSzOM-IF#SIq*mkWP8ahXnClH#@hEZ!YkCSiujgrDLR>t6qz|KmGO zKC8bI<5Z{Sy{;8czluw5GRIY~L@Vt(Eq;CieG@dxs?C6F>>THkT#bDm@quY%`ZCv} zFr82{QjcObo%R&_qCk0{PQ?6>VLYSm`!9~b$FcuPf@4Iy*X&FVNJ3*1oH)Ml#FPy4-)ogIc8VE@q#om>F0aglJRm7%C@eg2D4~g>7xh!aDVD zRny`P^;(^({GUBEdA)%>pm*6Hn%^&b%6>m?s6T9Q*92D<)-jB%W1xnou|zl6p4!ZI z8dRWU4{3v=7hhARQQLQ1qa8rFkYMvS z_?zuEFeYSU05i%obUdndtz37`Z!XJ$!H_#uBCALhozzWBoKKQXzMV=<`>#nEsP(Ggl@gQJ!G#8Vh_yz$JFeZ;-okK%AHN5O5v2I;%31!y_*87%A$~ zCa{3e=6e#d%K-u0Xu-CTOhtm(UKBef1L-bIsM_JSGu@ZF>3#s{sh*h0&AD0F(bhuP z(S9}c(OhTpfuXnYLg)~U>V{w9kikSbv=U4?coa~?Ghs4 z<4-nAH`JrOXZYqzl4OV;CK{xkU-^7*rLl0wY3xlMY{;CmkE#2kFu3c#<|<^^CBIm9 zL)9w0=Z>zSnOX~z13J=6U+G$bI4U@H z5ASx>er}DfVmamZXwuPyEtrB-=!R1UUMxc85&yo0?FVXDXkaurJHp8?)uwmvgW4Wo z;5npHYERDAYPb%c#CvOreTF@gwb=G7j`}ztP;z;D5o{o&G9$}rpuas~NZWQAX~8toU(1vYJt=KN7OnMkIr)M#E z-9}%T8}X?($zFgSL(~MNig^4kE~0!sC#657h{W}1QfYsvq3nodc)CC?6Zcc6lJp>) zELrsxVJHYG6%m9uglO+NxNk`2IE0fM=k1bTpQWG$T70QvBDNI2=@cf-4U zxUz5^1{YPge&#w12f+9$LJU`|7^x(V`Ks8?*f46W<|=r=x_(9-TNxvdtZI1g*B_w% zh!ChB!mN1M6Lcb8q~c4_kFZFYvGF-iFoww;ysh3k37W8N%Vmd#B;;)jvv=0rU+yj? za~fO2wKzB^aYUJ&rlTaNxs0uDtN3n)7KT$l5b!gQ#s6Rk{TRgSOmeDKuU5)_TR5$} zdJdgkmeH*z~HRA#M~E;lK}R|7TdUz}1RcYk;J+U(f1PJVm~RzY)X}3POzG zsn}dC07C6p8&AK843nD{o>&$0y(Es%xeM>F(K;x7`8P9XEt$1C=Q$m9bBp$SMNQXQ z2LqGrA5x1@_Bq)ZiLv~8MdIfK7L8~cO6x@M`c*?Q&^OJEoX?!T>cnsH?w(m=@o9&- z%=Oj=1ST~EP&6_vV?#g8j*`_bAF8A&fS%tt<=@n{*EmMkU-w#&eet?Mn@RdCU4R?6f+ zr6Ajz3vNx)kBfEkRT1|~Li!S)8jxoZ2a)EF7wW^X4`y_-^#ghwdwp|P3w1DDJ`2A0 z&n>U7u1(71EVC6kOitLx`(((3vu88g<_@wt5k&R%ys#NQpqzih@%VGVwVl!mpe;|` znuxHr;o+6F;`yhz>@eP{2NPdt0Zt8ySl1f?_DI*ztYq)3sXlAzX=Go(nyOV_%Dx z1o5E_iryg^u+9||W%UGl1^FV|Q69i5^e13ikiOE2cL9z)quoKyjyr5TVyO~4C^cO3 zcX!8;uvLBnLB?;ZnsDZ;6S}D=7(DfQ=!-~h!)V$uHD|b`^*R135`kBuJIUi zNITo{+C92Fag92y^`&Q77Ta3{cdtY5JPHp8J3=TB5tJhVz435LA4V`f$7fYMyN#=T zo6J=^x#vTos)dEzCg!wBq+fBqlve+U#2fhuvXi?q`=2)bKT)MX!p@}0764(V9gV< zUWtMzL$>|sZsSS<5;~~!^8AJxIO7A1yv)8koz1m92ZXl;`YBO|UI*%VPYfWI5idri zV-}P3QR?Q^UH?H-gPnbvmlDwWPY?jOfmpkRSSNLd&8R!^DNVG>-N8B>5?$4Cg&FUv zVbBSpoEBKDv`L^gc=;%VhR*h#biRivP|JHWIIf_bW z5fK_ecV|@N6*Eq*R?|}Kc(y{f5OtBu{zs04O#~`_>|qLWvwKy9*!&UPqd22~y_<%M z|DK9jQLcIEgpU@F)%uJlM`(njuV8hQ>?k8>sYpA)tWvrn55nGzfR5~2kOwo2PouBrO`fQL{L(xU-*E>6IA=cIS z$StF8-S~jSatJ+^Q-!U$tHgm07K#Z6j79)tS@NY}A;20z(f_l5x{kI2n)b;jzw3n= z8C@Io&=vbC@OBCH=m~En1jO*k%_y2kkeqvhP)rx)PL@ddZSKm|@~7gB97@6N%yUV- zu$?e7F*4#-;BJoM;B6JE=8-F$BfQQ|kH| zg69sEkv(r$ZnqnIu$AS4qwMyJ&rx1uC7#Vdh;U}9>xWaG1XHjTLH}==>R~$87D`OPc+9!(PQ~w;v<&3F+s#U!I>4Htbo|*XO>0E}pO$)MJ-gGJ8KIppNBMVt?=t;7bu@R>Id$9V$A$vJIF; zI?5u9u3kZ0n~wU75u=P*1YJ)>mT@*}&kzv=(c5m3a_0r^+RnDFCuT23OhAIV2Dv@i00C0FiylU;vc&Yf z`Zqbmb>WGxPw$bH=z`!)P~eX^$YYC=wDhfG8y(zKZI*@uDwbsUOkT7 zB9%ErRh<1Cs5t{D^Io1Pi>%gv^$knBUT8cE4VYpnQzq0 zrUQ+U+vqT~$x6+h;`%N8ss$R4T}&Qf$a;n79`b}&EKs%R|Ay7GZ0+@ZmFGe^LU(cf z3Z1GZO-h^Sh-&uq)^(D@YwKfrtODc@|A-|=s0ZIcKOS^%bE}4Szlr(I_E}ilawNtD z=hnK+%84+jWOOw>OuG8r`^`}xQk)VMPE3x&SLJjWW&Q|der%^@bWs$fn}68lydMu8 z&BigoK1&zjo97&(6#Rn&^ezcx1D31!N!j;5VI-1pejNa!GRC0a_z8JBoOoB5YcmG= zBq9C0e2R-<&RN+0+VOa7yUn?aH?!KCu$;Ed-ER~ls?CqT$;t%Oe0F>~t95Xbtr zx;Izly@!%dOIEz*0{TApCOT;mc!s-ygITsaIM;9dVh8%h0hMnmYwqIuS~`sE3Z4U35@aP8OrY3Dd>&JP?6ft0P7XG_Jh;p-QFVQXj!<(D=A^GK zT{0|>4W8I8ZYdrjD=GmwtH#S9h=rhU7(jnN(Yyl6zoaH19OT4egb%JA>1LqbiFXFU zxG5Q#Dx`oY2z3akLVMUv=Z&PiEo(N5)}cd!2Fl@v+k~8L>3Q@^X@0FMn%etxgRnY$W1!ziX?_adv6u-2jA!GKj;|#xcGLDaQzFd zEdpsTA}lV{swjc|5S?Kl7JwWzc;YuAD6rFC9~{J~j_|qgNV45*OnbquycNF3W98Uz z?>da<$>yu(x#YY@!lUa57?rxCt*#o$qsjk)(EA^@OK=OZo|V-`LMsJf0UCZ65N^IH zKJT}Cc0}3pelXP1Mcy)1CL|0SoJwM)BpfcI{W+>NwLpB1-5@W05Yw3RDQ-h4?^5TwDxtgi~W8a#l+X%9bMN1@MPov=*vk|xUOHa>}E!ip9K1sLtNSLDn{!z~q*sWQh zhp+^q{_FRtQ_zfBV`@!W&@_vk(A z?fq)hx?j(qC1*sYFBzB4TTX%~EPZl!&5RM#gjbb-XDHi+Hp&V`u624Jx>8lDHjq3E zXnU6~FR=z@6zs{KQe4O$dvX*-%H$?L6Orl`$ylJHqa;n?-VYQKoqgF>fN&Od{*C6^ zK;SZ_8aoW0&(Kj;Hi8W*E>$r@5DLG9)?TVk+w2_Ty+!C+O+B_aXtV zwX^jAn|pi3Hzl^yfe2g(NhvISGW?8kl(cwP%KzUm14cc@3x(n-LnQ;!WC>spJ zre-wgrV)h8#fG*&imX&KQ*|OqWxew{W(?PT;U8bOs|;)S#n68cb@*^u!N6nX9Dm0g zpdJ6)z2Up^m=Q-73m!3;L^1))V%cVEGY`4JP@nj)qel>mzezQ^3NF_ZD%6Yco4S0r zN+c_=8t?v&Xh({X_cRWjoM~EcDW*jo70+Aj%frzX4I}D+RJ~?2w22dE;sz#g+G4C+ zQi1R1`7N&KT$x8c7Iq3Vxm*S^N+Ke2WEu8{IPHR`3~PKmcN(r)n_5 zDrxHHJnfY&2c+QK>5=h$ts&EI6}V21aRz0sORfpSnI8F(M*{Y3`b$(g^6ltzT*R$j z_a%q>Qxy^k{P<68#Kb7LW(D+3vWy|ffRHFXceOyz=i7V~$y`6I_twx>!<_g~#f zJGDJlJPwO+}wvdbXvT7YgcQ2QF2 z#B>cZ$T~j5$2F)?CT{&PL;xpM=eRYUQ-Cvh?$@>0!;!ck1i@!LTrm~Q$C8)*^kDF%%T)dKZVhP{2;=PZx{ zE}d6&5EZHXzatwaD+dAi2WXaK;Ew_|6zoT2_seHQX2dypt12YnCGx7&;28$eeW1KB z*!$r7tJ2v@*4x~-*$Qtj^z%g@X3|Qaf#*bj7sEA>3*i_AIl^A0=bK&|<=i9B+)_vJ zG}*l4M1U+b8(r!=AU;2pdN0{v6!N|)AgJkZ%8$p_U~`DFl$Y@91x8-v zt1~j$`dFovmR9h(IsH_%k5hij${1R}85i&;K%VrF@M)s$dmXTO!y`_@en?P*gMCfa zztReE?S#y8Wfpr0`)-Fs^+*o14fy z_&?iI$?m3J=O6{#gDxmh^bJvJ46z-8@gN;cr-LZ+xkL z1tW#w0o_7JyV@7JFI$YjDgT7?+JURz)Zj~DM&(TH$R5XmQhsMq^G3pC&K&qrlYAQjb^Milz1u5NQ=hMRzcf z4)Pvb!_S`&I^cZ%=(5pK4nS_4=gH=3?ffkgVgGJe&hm;+D=)BUln!{ha0=pM0P#u0o5L{AlN$EcI zKOk{c1yD?+qc$U?iYvaTe9enpsPwdP#4aDeS`M37yWpyg=UV1J2Qwfi&5+wK-B3iQ z?z;~kx|N>}h>4|kv)St#ew)rd)X~N^H-51qb*c9W2(@V&i%=3dsy6_{I=4{RQTctk z(as)~WSDXQ*u3i6vcAB(mWiiYFib@<1xwa9~e1$sILjkNnTs4*F6 z9^e0-^G<l~*;`7*FmPD_TfbgMP3^ReDg*)R91|8P~2ZX$@3Aggvl`W&Mx5x@8QO zW0aJXuund%8F-EYrBk%CaBn8~aZ9|7r5G&D9DT^Qa8&k!13>$UcIT~kjbCN|8$`Ln z8XPXo4CUw80!933q~J5Zkx61j_<2qUXF8QCT|g8U6n&bfCi65i=+N|Fg8>9eMnaWs z6thie^8{zEKVjD4PYgW5V_t+}>PNtzK}MfoB}FAUBhL}Bzbg3H*CHn0d)N6HuG>D@ zCLQ#iHk@+Is844pY30kWpIYY>cvUeomBr(8d=hT)7&1~ZLPkT$EKADDR)rjlFd;%6 z*Yz)uiqQE&I{&=DuD9yt8>I}N(*$HjHR6y+B!*tnG2773LgWCI=MnF{QPp(7FT^D5 z4&(#w_rl{<2G+|KjORuI+3xW$dW#kFyv(FKufiFtcA>2@Ki94<=CCl;ce`mVr;E$8tq7ida#qkvttqtj6oD{B4JAq$$F=qXG@0g8s5Q`EjxzJCJOUt-ApT1$J1kq*+KW!p}d688W-R0>m(UVzg;8}@X)^~?#)poq60 z(jBG8!(+Lkz4re&s8FHkn&JZ>gu+IoINMRq7xvciMJ+PT3Rwk>;b@x$*qiW)F$EjF z=p%))XH{X&q!$(g$;@f>B1_1rZnT1;Mr1=hDc;!c*aiLtra&5%uzq%w&s72?vsdVT zeRWfJBy*V+4*jT%)PQY?`i7tFb!Yy(O~7f5GsD< z`Z5&Va`Y+R5Fx({qpqeEuP^Ur%tk~1FQuVMWzHL#(1VjL=Fnw~6^g+GRi%}^n?wEQ z!tt~VW<|@5qYsSh1uf@aa_ipbJW#1TdKF6mCF;8K5!MAsaJM|}p z>=mvigx4tUKrzG|d6|$YM-(u2CcE_kO|hQ0VQ3qJc1xFIsf?S_h-6q zWys$rtWTM-n|D3`6!@dTjOd9!uisoZ@ix?m;VuWyTe89OaL>UE=>`~K9B5H|t{mbc zN;zaV!lsuV$Dvmgv!!(i{ z;`mur7?L3bd|QEG*YS@lUl=<(vW0zs9|3pCrKI9R*(_V1Nb({#Y3gu%I@pE#9 z>rRQj5X;VwY?EwzfK7aQ7*YnizNR4_`%O=!&&B5FM&sM>LUH>3`ILg|9O%Kkl72|; z-_%+zMY9ClBr6LDMsUK(N%7hIgq8@2wv+YT<+JbBY!{C`9>-v(SK7IZT?L79FU=HT z{2IuYIahj>8f>ThipC{|#8aEsWl{w%Biw5^R0gdsdSU5Xdg<97*_YeB?*or1Jgu8D zF9U!K?~u$W;!Z*P#N>oO(=B&!;=@y~z?2sz_u%IB$J9mjd(kY)6K-H*9M*DySO3ti zR-R$u74b^xi+Srzev~i~M+Iw_jI^l$2pe)r*Ma=p9IFb*I35Y(F-y3QI+@cbTqFmQX+F?kEXU^`#c zWw<>th-dKTe)wHawk`r5@wI`f=_4V7_&JE}WB>H;0R}&0P3ucE2%qTpL^-!2qEprt zcD{z$U+D5U3198qe#U~@{`?qedh=@;u3mWm12b=NZt3)5cbjqa)6VM9$2e?jA^N-K zee=>U4ecbAbaeD+7?C_~e4+qOR6P4mzznDumQkh0Jq_*5F92h=-PnNIR-x%w=%Qh4arC}97THyzZoa&(W(CT7SAU_R(x z;$_ZOY8mf0$F)iaTH!|nx0=TGdx4p4OrhtWBi8r0g+#%WS1^Nqv}V_KMaFTVqL0n*Z;=M zAhF-g+>1qS+F#OYyo2->UcQwq8S7aGoD@4`GF_WwrcjcUEX;RS-qtZ%smL=>?GldI zp{K6>K2WRiTMNP^cd6&W4rpgjJuVoB+2?Tri!Ht>=ArZp&-jYF2$&jrorGH_G4D8m zt;SmO{8Ubd^yetinxDYGzTLMN+GJ*M+s7W2AXSBvy*GOt(6IH_`LdnY`{Qr`fd0I7 z!U7N-YJ0OD$ukcv2n4UeQmI1jsh?Ta396VbW&TBglb_N=(=lkZ;>n-4R%@VU{)2!7 z0Mr@du9j|mQILacf*&?pi_EBU65fmgjNwO}+3@)yr5h%yCrd=|hT0q{2CLt??9$C1 za56`Q_A5Rv&iGc8nD~8E6M$)y91SD*sod6k*ziY%<6&1b7)lc2OAUT+n)t4al8m5b37&@w&#pxV7tu2An&j&xrzjv%qR@9pLE3N|immZ(2nM4UV{e#*`H+Yoe zZ$nzQ0bja7z&!jsePWo_C>WXGh0@m}#m=jehcu5b{wn#D2mXWi0oLPsE!J?FW2$AN z8YLG-;ggWIJ1Q(3JBlM~lbCdgb}ZMP7)%Gt0l)yiHa@Cg;YCDiGx>e+QDv_pnEC7a zX-=$|hx0Q`NWTp_IwHwS4L4%RQeKSfd&`|U)qVN9j}DC4+CnlEW=XzHZ!K$n11p(d z5okNiYIp0(P<@@x6|T|%(=avh2e3F!pQ2Fqy$|H@7f?Dzo<+)(i|wR0XAQalMAKF3 z)+$6(taD4}40w8A!{?Q;;Sp69DPzK!6!9;9&>FB$1duRLwG)7eAV|Ob6JyQ&wgq-y z;x|MTNaw%k_eF`ZLkEvoZThCd#=nOQ*k`5Z4$07o&e{z{(pHGgVeROCdTcQYDEz@r zDQ1n(OI&R4C~cj7Pt<^t&4wyU)E0KZF+T1Nt{|2g0usBc98(v#d5bq7e=3hzJJ&O` z25V9pX6wgivoE);R(#u0@7NDPM}eV4P0)`FUtGbn>?JjyL8!Ht^GjI@Fg>^a zi(%fEw4%=j?LOabRo(YH{2rmzLyJg><-6-^(}&~c) z5TziVMI}^OHFQbdMM_ThV7nxY;R3h{tn&H01>g0XeX2K}L4ZXeRe4BRsj8@IaH|Om zlEU`!udu?g&#wam3UM*MqGY3_XXMmVcwynW9}Oy%eCt8#di}C~)ml>-|2yG`RW7s; zk?P%Ox`oNmJfhpnJ7A2#n_#SoPzu00u-l0FKv)r!%}GjMg;*?;R#MFEw>$420eBJ? zzopOOU40kK^2l4V(p+{EAe!Ofa__JNhY)Vc)ZuTBFAc`HPjwW%0B;@1Z3PO(NDY}^ z#W=7Mb3_ff4|V|?(1w`^xf9+rP>V~~=^fqcrSi25`*I)Q&j==N z^-B?rgDY;Tjeg1H`NNVwJO=$2D0d+L&Ox3>hF83EsF@I1&N%qZx`KY$PAdBUJwbk~ zHIy}?id?ZqEkXGTu1D&>;|(jOiDxyn!W%S#+akZ@Qrvv`0e|XtwUwaoY}0EHw$iJQ z;2wSYK;%Zq5_qM~czYtGcubNefOR9o zkbEHXZ<4*zV3}!_`9~5w3`EKL9@L0@pJN@icqQ*0l%-Ivy`Jgi<&RK@-;cUr)_SWZ z)peV5JholCzrcv{6_HlN&Uc+qOcoYZF9Z2%IfEUcoDmzLwDCOFnB_F?WgMf=bJ@{u z^KY&aV&Cbt!zy=0*JQ*s#RCN8~Jndlqmdmdx(!CBQ%K^>;-u>a!a zaOVPz*l{}!?Y1WJv}0}D3KqhoF>;r(0-vU6GmA5iYK?!)kb#wFlBX`yC}1+?%}3A% z1Wy(xxJ_o9|NFA0N)U>wzOr(5MwI{DtAh~G8A7&Vvz6#wAr0lJL?acl{Haq=qgkCd z;r$fjl4Hr$x7KjXP`idW;gX)|+SYZ}aF=tj$xlq3h<*Xz8M_4f6#hm@dwU_sNS#~a z4PV5R^wdrB)h>+aGP+A4r-d*<0^29W@%uAchgV2RZ;Qy9KZ@;As_e66icC@7`-8VL zSGjH4Rr_u+pr{kO3t<>$l7BsZ12qt4XiW5urv@+{bgQ{c1HsOaxS7D+Os#*j!nMYwOpjSJ*VC+z%)~%c6EX#WZTge-u|t zkAUD0nni6InWN+X_v`6}i4}<~h<6_fBKRA%5|#$u@7Wul&mRBc+wV4-cIYOuKj4@3 z5AIDur~K(I9}g;T&wgRi@%!WZ1YuqGAFQ8ot~mVB=qV9)4ZknZtCxJ?g<)${je+tk zGVSCl6Oa>+%>_=FaMe<3j=S?Qk&xas+k0GZ7jYsulRo*|bB4A&fA%dkT;VEaqnFNq zC$>{}^&^QXVPiOH)rD1g5MpEPEQdY1$nrqn_@n5LH68$fqA4Z%MYWGgqs~0@e$=jO z+<{HS_G|HbXOoi^7PYNgLP1@46pYbZE4z_A-tx)zV3r%e&92{}h-aM>;h(~8&wLXzpuB)WJUy=_Fzc_2I-9<(ocVy1eJ)sNA z27FMQzF+qhwlhOgdWA~{R}$B39j9uL9xZY;m8An@R6kdqs>Q3d8nZ$I!p1Et{$ZtO zR$5_CrH3}=xQ*XbfavvG?EYr&2npOa#|6ZOy&HACd`Hw`h%nrh+BiEmO&UCM{lHU5 z!V30UU}B^u{I^eBFQ41%cHuL+W1X9)>eeyXGMWM2W>glkDYI3!1m$lf7R>s2TwNfu zXpU62Qd!kq=L*J7m54BlTZl3jL^g z49Kr$BgyAODC+A9o2tWvbtr-+#?b~Su4b>5n@8io7$qSTz7sXY4|}nV)xZO;lTGYj z;e~?1Zt!j89ZeOa9Gsq-N+NTAe}A%HnAI@42yj`|Mz=CvZHm;~Jq%#(Mjp$a2q)pT z;}E`^1H}06E+I3V)Ai4kYUmg}VO|BTO*Y2x#z&q*D7WzgysPlh-J*Bb1&>5^w_2ue z0T%}Fh*%}*X7voME_4uM?&8aKW%n@ZE_spmT9bXxPn}@3jqxj<@IwaA@hH??n-{eg zau%@87YdoB?$ayydj!oQ**~18ZO7O~(6H(1Wm8kak`J%h7ED@*H`XQ6X2Vgems!=M_;2kNZjI8^O3m)-2 zZl6lFIDs|2Mf$g9dpjY@n$gFb)CV?*dEA{$m;s7M)}O?oEZb$&}(^0uCtHFRMp&GsoP{;;v%?vVbab)U_C<< zaBrXEZWC1mwx5m?CIsR0v^%t?Jm5mnO>ESq0Yop9bCcNXKhVkid%ffUsiccEkHkyu z_|uJO`zJaU*?U}jfvexRC+IYxvCM$`*ydj-`P^p6$uz$4p1&$oimMeIB?^myh|X)Q z!B}}(^y~PNafbj@ky`6wPw}l^)w%wTi11l2L6FDQC7FbG2Pm6L`3j6hX=x`EMF~c_ zLDH2=nq;<|2@jJTF$md0NVg?ek1RP*WhBexOV3IukGHTK0<6omP~e^OLH6{L4Ga&4 zdpd5S`^-nHm>QWczW0mfm*5xCBjO`Qjll*#;icBxz>lWVCWNky{s3eEN(8$8`xZ%q zOHzRb`IeseBV%veI!*5}N+o*8NlZaqI!H5=Hr1OCtDJ^L4JGDsP-e{<6`ShcCWKxT43IO&EuA0b1Gjl%o^!qd7 z)3vM+2F!v3t75S-d9*iUn;#zmLl&6HBM%83-@gfJP$8X&MHOK+| zFLx-4o*P%XDm!Au51ccclo~isIGvCab6zwuw`Nhw3(Q5ucl4dx9(R9rT3r^zF;wTY z;!+CT*S)G~so|8E4rV-wLTs=_vYGkJ9dQvEg}3&>VyPMh;4*};7JVE3ZQ9XQGB9(8 zvwY|TM;1+?4vSLOzfYkC071kWV@JyCiFGL#VU9a6s)?uofC1}=oQ|_H6*{0-ftAU= zlt%;@zUu=xx$|y^ejoc2#nA_8M@G|8?zrNt^NWAvnV~0`T<{8Duf7L1`7Fv^U;2D` z^&12~7kyTF8QvY|)`TnHtxK{qT26&kV|iZHiQT-%z~k4q7q^|X!B3q40O0kt(v9Eo z|M6zpgIv`InmeAG5DVTZ{lt(2^3dSDZNFVjYt86&f$`s)WWs4icdYA;(5OD(?s!%s zoG_#V2UC*q*+IPb$>#9-c!SfvvenW7@nhwy*Nmwg>9gY0RgsOlu1B|e`I5;{@xslt zih_3SZe~5P^kbQY@zFieXy?s*?Jk?PjZa9v8CkWS=M)P+>wwSW;oMb6rDkhG`AR*i zN)2S_LM|~yW&34~KfArX$*_hsnOmx%tUlLus(aJ8!EEij%($RYp9%fZ<#=b6!IM+R z^Hn|nDs985VgY2aWepaku{fI6$lB!jPKU!aHiC>Fs`o8v$4=azY>Z{X`+`+YWT5Y+ z#-P1@P#jyvX)c}LvupK!OWAXmec1ZU&y7@&V>R}z5i94%{?E?VuD4oiS3Q}C`!_)% zthLkOUYHn1M(xyJtqYDs^|z???JSf%63gbxuC|wI#|4d#m+Birwtmmk^wFD`X?3?Q z+bp-4ykpa^&dG(_+@Eb$M8-|!$&QUG&kQjSiLKQNEm`PKi}cz(^t=;IXa9KX94=5# zw74D`WqH3ed*2YBbKh#II&^c@M(sTq4T~sSvN}E6`dr7hKTOIloF&$7i6q)o-M&^Y zX8Lygx;`bf_^b4tsAAXYl*o`Z{&X}=$jrd&KHvMkFX{KQ{9Jltq=%`fX7uA&1ZiWu z89n4Y$jZ7W@fqYY4Su)vc;#|qJ5$rQa=R5|HD@y)lRLmtncdrPD&0^nfV&XRn0qwy z&P>zklzg`Oo4_zTOTFt$-Lwm=vHNRaS<;b!$);Tz$UW~;W{s3+$W^^^&E#$=NqQH4 z`DKD>st}(gS4Y2X2G?jeyv=8q@d-BQ6Ae1d^shgq^Yn@wy?PuMejX7{I98r?fQm z9u<9IobL-CItA#m)h+Z2ZW_!0$+TFAIBY5jv5dh|YA@ziSx|ttBlP ztUaeatBfKYzdr`6VAg%t_q^$QX8^BGSmeE4ls!;}JH8j;>i?b@&MB{~91B_NSV?5j zG7_3GdrVoZMfhg*xFL7^>?6C+ukNvYbNtzmIHZ*!AW(rG=`MrH_o>D+?_z!R8!xi^ys!qGh zb*i4CNJ~l1r1YoR=06=)oq2ADgbTwrr^1Ghw;$@5lPc%S(|B1w!q@Ginbz&O)GJNX zG9nfZ-Zu&&^{j`D3oQz>t(*#W=2d-HAvGVRHKO}*e%=3ap{X3K;dW#dpdRR#|6uI+ zZnTE{`=1js;w9Y9itIxq>=9K?h@&g?hYB<4e$Q% z^E@B+b?5B-w%yl#<+N9cQrch2_r4=O_T>@NT*ygHG9<|OV@JSSv1}+u%;sp6{;QF6 zp8#Uyb>FA;d3(|iIg{|bVk`u|3r`bp{PZ}U#cQ>~y-$`^^&?%TBi>iHW85vc*&ywp zN<3OHTle0Dc$RNQT8Y%cW}&`HckxOSk>%84f0*3sG|OV~Ee3}&)tpMOzJZurj@M4y z+BUZd z;SHhmCtluDpWPNM-=^=^tGWjZRk(*j%`V~YY(D15x#V<_{e^ihv(cqXi?N1`Sz!RZUPG?g+8!bMe%e^Y z80nGk;dGje7zho;YPf1dHfK+FnK3gl4c=Ys86ii%YhFwIAU(hSQ)e~1AOrC-P7`#l z4#Xx|2!2V+%)rn;^6p@%X)^dm%H6%`ei1n&#v&~IknWLMeP6WDNA&K~Tpz^8dRXdD zPtW{+$)uZ;b=HVdg52C(QVTQhjS;@p+Kk5ov!nU;`sSkgo2eqT(V+iEDOb*so!wQl zdhijElv>WdD(C%A*O9r$y%7|y$FuJb{p>P8aD#y2Vy0_Wq-yB6M`NSs6VOCVbG?3- zot@osMJ49ls_=`wYh!bBStw1^Yhk3VAU}WM>G59IXPd9%Hlpq6Zoj=^{kbVb6QFNc z7JB*06JN}rEcEn;*bU2x4KpmGuC{h+T4jo2rIk73!cME$~~4&ORVl|nxIqd z3`<7G?#MMnv7s$!vh>5D`uqS^*ungyo(9C!cOkQcuY9qHi>KbW+e4?>I{;^Ic1#+N z-25?{$98XdNU)go1ZQ^o$@)`Ev&pJxOaAEwPT2r1B+B@lPO`SGP{V z%GLY@pHpxIvUsA*g@coMIQ?@*NR-!Buve{jn+Y{zZIrEUS;4~edWbA94dY+M3?Vyu zb+$uUxw=as*Cx@KDHYn~)$aHH&Rz{IvP|O-K9LJKTL;!cQR!c8bQcSqKoeaoDn;C9Bg^U>vyXbI89oL)Q;|jso{onrS)if-f`AB30zpgi- z;GH{*NckeLzI+-ai%>JLTJ3BF@{TbvJJk!uMej{qBM!89gKox7A_VWvF9jKt>C1 zXI_HfPRDLrizLFG3p=u(4?FcRwx(KF%iX@$NiI0VUUXXZkpX1Y_SRA?q-AP%-kD*Z zChkeLT2@`Nu};O^Ij zg=RFTy9Kq5J5}fJ2O1t5jpvXD2I=j??IbR1<<<64JrXwkdNDSA+h@(T-e#-O*lIyN zQC^cNhW-50B)djQz5F*N?H_84l82Z+KYI`UQ@L6w)gsP%@Kii6xPh$a54uI){gEMb zE$V1fyXA4;!uX7*%qCjmo}rp|7~s5b{n;VZv#ut6bsp`VuEd{&sGseAqV{<^4j+}$ zE0>$S^*x$LGHBWj&j|O)D2;)GE!NZakWyVEWC;{8HTecY-_z#9GpgLsh?>A)oo5|f z=E4<`R3(9?^X2%%XX=d4)V1SWDOS|S;e!(vBa`Eiyps#2@NKov^VtftJHS!BR&z!z zEf3Rl&rgSCD?}|FbWg9uXrp#1H2TaK^eagYFPCHqi0B1$RIge&xJ**36p|Klx5<^( zTST_TUW+f=d`HAbqac}UAtUrlc|<7XG}ZT6_2Xfcl)u%1gF%QF$KFRjpL~w00GA6w z5O4D^4QlWE#nlC$=WC?8eaQ=Hg>|!vD`}cJ~LSi~9e(j-8fw>Mb@pz>8?;9E^Xr{DnGWeF^MhUPISy{V z(y6jc3D74QOdlw0UmB}*2L8}z}j}P z1Vaf$mt28EyMgUq;M+)ic~J+qY_vD}`ni3syjaRgFJJgP-YMXOcf9&1G(gN0qlh** zYqwM@msrt!(O`MoN=KXF?9%RauclzR>_niG$fjQ-S8e6(hfo$DAz`ynm8pcsWv&en z1kJkv`<%s%&m7xVNcewQ3861xljl*65|#RBLft~Bg!D|7n_a3i+C*o8!#IBZZNDYb z%CNA84%>P%<|3CFLD_I7mMmgw;*GN)*AtwZ5h9#$B}OsdsB5a>T7gM$-@9JPKpmy7 z*)r{AA+3M#pZRAs=roIJBZqm3devRYOIA#Nm;eAXZwfm;-T0^n!E&dLyaP)5OSqNl z_eS?~(;ShIeTbFVt6?G0?_&0!X6WlRE29qNyCn{nR>KrD#r{}1b3+nW~iK6mlkg5E@0fUG2i9k93)EXAC zg0)fU_8r`VG-kJ^4ava6x$4EikAuq_6wtjbi%pxYn`rV~bD($%s15LMaN~+4;=EkH z<%n0H-e$Hp?M zH}W49>o3j6716)`1!J0dD|RUW1aj7M@f)y_Ye(LB+z}pOHJ7T1^h{)~y;^K>?8tjt zu((8Gx+vT=m{$93Ue#+Sd`e!Y$*C@Lg9hEn^MTWO38vN%~q{F=fWLp$KmJw$GU@KR64HM=X787rIR5&_uC)_GFzKq)^@-#y^ZC1+Hahi{`tr@1E| zh_yq{3Zrv$g?g6~#kfa7r9vF0sByWax6(Vf^IItD19XrbM_)m|M8>dTzG_9DPip}U z7M%^PysU4ZN+9vzBXpYBg5d#(sOKDVjMu2Csi`~-wH@^h8fOPn;YO%IytDsH1WI&M zA7Z*>`I-&;7xap#uv3ukq}fhH@iF2U9|?^QX}qV};I0(h=1~esoIPc5Q7n8i2s$&k z{dPdfdxOS%KSgw*AL&SIan_W~{v~I@LK@gau%mfHb;7|5{9}eo-RO>NO!@@X{80@E zAHFAegr4P`A!Hm^nSnIazVhC&$E9iuF=t{4eUPs|??MP@QM<#gVAkj*sm?`Fx-h?k9PTmiXi}qa0M6XX7Jb@uD`$ zH&@{YI(%CeRDw#GI-$@di6kxJY+giNlcROzj2xK~z|RAEMp*Z?ruS!!dHnNHGj24= zDvTTe!80|%dLz7UP_Kf5K1TtX&yEZF36iuezsv#q-%Dew9y0e=#dbQgQ4(FePW`YMFu|Xg>gH32daf47XK@T zrZisF$ytkh4myg!J=dtjKVb;{TmSw<=hZ+^|EUO)x}O)`>+}l`pwa#l8DW$-E!&SP@z{#K2Lu#_Ab>_+FN!ZDU-swGpkmE zi!3I*XpwG8{(ZmxBLB`N)9(iNd?d`Z?SEDD-MPhIjalUP=eGvy zN)k${bs2mw{XGL3%@@?+x+!m4_LK0)ku)(<62gmToyiTk!SSmd9-pI^Jnj*Sq6W=I zB?5Nk_-qMDJO27#jM_)HvB*0nOue?znI08sWcvqsxJ&ibToI6qVpX=T7+1C_{PhoJ zJjf4VhMu8=*!&PV-QCva&rgFvz|q+@`01WsKYY5N;uYbSJb?zrg~pSKa58_tfDw{Z za55_ix7>1_l!^WMikB_zZ;E0GT92!5v&4xIf|I5}zkb+uXZK5TtD3(XO=cPAUbPQL zliG!P#~sJtnoEM_xPk0cIZoH(=jUN^vq4jMi-|6CW&1M!gt9>Hrzs0} zIkAHQsHN#v`R;YQlHdK^ z1360H){$9wMh%2`%jJuw#&PVS&|2!n$5IBqvr18+NbF7*HXcakX#gXC{s zI&!b&L`t~(-LbrHX;X$l?<6)^PDX`}=boRApBKQ+UsS+GU#YiWxu{;_e;>{zi_NTk z{XpG=P8WJRI;rG&M+=Nr%L`6j%%tH&G*IP&UKj{k{AN$#ztStLe9+8;EejU}r@p=+ zu3LR}gJ-o}#@@Pel`;DF!zMAka!P$V%qf@xb+Tu*o)9GqfxgT4c#Fq*zr!t^q6{<$ z<0SlflwTkmuC6e*Bs&R1bkK)-Pm`D6%XRFMOw-q*gcX2ULTbBLFP z<2Zq*R>qHg8||=QFwC3bq;u7)vii_sav=>}T8$-KT*;?_k{TX{lC)uf+e)Q-MLeXW z+7X1~n`70rseRL^R^T#ICP$CFl-4V}JJgPUpMGz>o$h?_hjD0>B_{O5RwypUNX*?N zq3v`~exJs-scp^QEdAEQ3<9_-UPH-TxqfONe98`9229=e$Y3|(L?fKzQKGLQgT-FQ ze>5rL2zP0G{p~uXb?PCtQ{C>k<7oX}U!RP1a{n>9qZpsa<4x^MK9=@GF3x?7HRLm? zKo;37p2ilS;v_Z!8Nb;(8UIoXL-3M-Uw0R}lg zaB|br;Oe2-qTkX>9s5 zoTc#Gf<*Jf*VhqMsDp{MZIX1Yw-5KL&$b=UkH=-M*54lwNa~;tYVPztg8$K%o&WKe z)a6KDfV;s2bT*PF4~J(ZrP}RY?D<@*Tp>p!2M770lGw)m7v7@{HSQItb^UX_>a$ws zryJ06BW^tQOYEcooo3f~d%owKgz_J^RhN89%eRDErdP4@fC(y{E6tsC`L4{}G$wob zFSSyA(oH@0cY0uhtv^+km-@e8c(-b2erwCe$2i5QtCQTW8H>#4q?#|J&uc9bT!|d z;zJR1jj6Ure^e|8sI}TpA38skIzXUDQ&f9S-nOv?w;DXXpN*7~mgY@E6{TYYqM7L7 z^XM#mTF_k3ruoJ|;hmk6vz$!0w#~{|$??3u1^w`W_4|n?&LZtnCvjEKC)&O)36lwy$sF2R@){&dB&pFxH<9NAZwoClz zw6vi27Xkh4Gd9g)n+>HJhw*1rRvHfw2pX=5XV=&hQUYs0UQ$tQn9pmFZ7IRE6)Q{3 zUg`)jCFpOR?i@uETV8oYVu$x7PeJg5P5Sfk9TiM#ZF4Hs+OJ2IiRg>)*Bf@Y96Jks zWy6lf^`nZQR26};-G2L{Vih5PNMm%Y;%*e2K`14cc{7_5;fR z9<`f#{Uo&^H0+i&Wc~*r@Nj!kEN(kauzZTUtQXppvx@jiUhJ~UyqBUEdS?%&M34ZH z)&csj-9TrAE2~;Z_Is?sEt1fYHZUyvZb2DPDy*t%S*YSVN%{)vFueA<-19+&p7+6i zj}yHeSMlGjy6?SgT?1?IM-LU8@|WCU3wwR4uW^arENK3h2KK%94nw4=x=8aRJmDL> zVQNa_*7wC?HsFTY7a)a!Up2^e%22^|*(+$a+dekUxvkxBiWOH~QBfXYyoziY1)#`p zk83(eQ2P@r&oPDn$?n|#`I|WB0*IqR2a098^s3Z;oFVAgUK` zo{5yf5uY*M!=5;wJ@WRKK4mEaND}z4g0pH;gX~(S+{QtS_-^SX&-F*@0|7orSFTsJ zdsu*(5$$hXk${h`%Yxrqvsq~qKE{*a^L;sCIbny2oN!({L#qiBRN&|UnJ%wPemwnn z$Afijnpq5GdXYj(l)_o?S=BtI&1iQ!MQxJp=8=f)WT#clZllA{BcK`>JwFH23R8B! zT65!_x8V3B*(RT#ol9^oD1?tt7(@cD1XUDiSyBJQx{em=gv$gmB z{eeAwl%DfkwJi#ye8>ZPW+dcnGd_W?XYFE`_9z|601^bZ_OIA(KshtbRo{oh)-@^q z!RAF>(L))~a~|sC5VeK()Nvb*FnSz}Jm#e=2@2$#PQP-!K&J7)0nuv&( z7_T7PG}j^(9q|LEO`xnH@P=L!%_Y8HKY0PNPlhE~Y}WeafnAPpoQgJZ&xqbq zd<$h@{>Tm`_Ku$d@54b(*PjkLw+~{_vl*%&%HVJWN9YjS3^3ZS@@GPNZOjA#?o(MD zOC`rRrw~KPTq%sK!3RC>r#gtRQ8&BO%))RK+ zgIJ|$vAv%jCp-}&ww9-x0xZ(pu^$KfIcQVw*bNX1O10yR=0yLvbImG|(c$CFuPBf? zW#4|JlGl*;tZA#;?7|xtaZmSW5g=ZJ6cNc=mCo7;7u$>~(sdatuaf~g$|(Y_#W>ig{MKu?#sy%$=!#2ON`3-z^FLVqdMA0-IE$1$dKknaqNkAQBs8pIgZpn36NQGedJO&OizUICMtjpwj!HO#xh6CtM z2ZhLhdwgVgWNjeZNF9HXkbbk%{OELQjsMbnp&0#tacOLF%Vc?6y-=lrAb_~G<%{4n z!TqGF1#@!RmUAI%xB~V+Z>g<2hw8lt+-I-n<({iq*Jo32{M*G{?2qNORT%4T1w|aD zZzg(aY&+2)#I8VX0daI9&6y%;+3n&Q_2OTBm$2`48LyuCkR%Ew#ePm=Cf>}Cz+URh=vwD9s2^Uh*eikp!2BRKLBXkflmdM~$*)A2sy zR>il!br4uiIqe zEir`FgTfC#WDPFj>=29$9rMxK^_QdPnR|Y){L^FE68e+M6|k+8>x&hq>hK> zsJEM8jBkUa5X-NV@4PJs|0A&pm@Llj4%b(y2HE6X^?lB362-sWW&3C#?Zp#k5u(?K zO|w5wb_-s`lr(+(c@Lxp@tI*-A2GDhU4=Ni1W!HSST>d}-u@0NnJ&2bb|@&MrF&?N zR~qX74IW`afwgrn)wNN(tSjj3^s!gso?iQTPV;^`f26$&VWdnww*!LDI5tTGC z6J~}myhHx!^Er(*pwf?~v#S``vXp+ArM~&5ZOamz>RLO;2sh0D%4)s$YKrJxZmatgSk05kyvE}02o+iOn^{8FpnGbaB-3it^rCd{*8YWRpL!7PhrLDLk?k`|l^6{83+aqiMwkGac_*gg<=Ul$r zasp>~$3_Ecx;sKXA}va+Vx@g##(L5A@S+X>)?3KN1+BUqFD_4C_B#jQANmzlbeyq$ zwH*{%eS~CjLO}cXC12k5URC{R6lK+IKSsg`p*#~C(F|_GMwV3Gk-Y9}?T7PR>o6Z{ zWr4X$s|G|t*VRI5Q~7B3XRM*21KPA7px|&Oj_6B0L|U`E_!FG{%zdOsnKdR(I&+U< z=bHvAc3cLMir-pwu8J;X-=!`Cj{}Psk2~jXBF{hHGGG0vk3LJATZuke7F>JbD5pADOwFIaqlp{C?rcB1YpXFccsCBSHv zV>IDiuQ@07&yNZ|ZtU1M;#=r7lYDxC$=jGbuMRL)8tgKNN~+d=$s1y8SiW$9)NVZV z2Hmp#@6EE^3_FwX+ZQ43q};^{=_Ez$OA2<*G!6v6k9jNj*9+4a(7n6m+d?^H$$6x= z3es3M9d;aQ)=G4-v?* zVs8bk@d`~LmZ?{`2vNBNyK0)QGN8XI{OaG)lvZD4v+LIdzkV`uH_W-M^ulIQT&P1k zY|9U&R!7y>S`{?d!{K`Wis9Ki-*3a#ufKyN*z*I_0hxbGLlHCwW=5c>uK%FGGWh+1 z#!nUg15Hrsi@H6~t;)hhuZk*R1mH)e3Cya;{~&h&1vACD@_Z^10qq}1`3-?kxifF< zwqHW4No-eI4_!_iGCaj=-kK1(RA(-AuoP5NmLw%bH1yLFzAtL>Zv>8!Y zTHfMCHgnZu6LIMctG~?b+BiOAQy(rti$(F1Fayt3^`AE3kVVLf&iVnL#e=e6KL@U) zbUdlsy|1TvzpwVM`i>{G3#9k((yxC2_W}uM0SFRed6(k;IlS@A%nvf=dwK$A^PEL? z8N}8&)?LQYYOc16gMRB&d0WmZePAOLv}OkJocF>=gV^m+TWEhLa`#KL?+4!DF;rj4 zZ@z{l;|8PCI~2AhQPji)DLZ%u(kM~Svj&~)DN(SAEZ{=^>K280v?t`}s`iT-e{O5l4>57k}#oVu-VmDK< zcY|eQ2YUb~o4;`yS2pB@V2!l&jLa`HRfw*sK3~M3jZmE;c;?`(-)}&CVEWW_DEm zb-~S0Fui>y+K8(%wittkSSOsuTk%aBAfR&+FdG+S%m|A?6AL~_5&GFTcJq2){ZAZ{ zK-?|OTZ0rjP0ExlDtQU73}+1H8vqf6z15}jDt49YK@p-HpurO^q*17*jA8|TEW(#? zm^I?yjo(!6JY3X5J+%{qqOH%W_MEG31dRK#`irX%=zr~a&{vys@+g7v3{{3w78KxEc-dV{?b}T26brh=r${@pc<4dnEe*&UaB%ksSkuqSK z24c^LA8~elI25L3Q|%u3Z}vdKmxR%-l)_u;WtyR$-zjvf0TkiuB^lFP$PS z9gM_{*;s~20GZkUv^`6gwt?&$Nj1BvsvO?Gf339TJYc|tWTUv5syIL~D&10PWcp8% zh+zhnjyChGktBZE%m@eSEWG!sxI;?#Rzc;8B^R zR@;n`78$P$+G+2fp9Q*f4Typr}v^Wwgc~%FN#{UJK64V@>ld^vbCXY5M$K zZEyQ&i))<6y~cDGSEt3RPZys|XR`)xA`wGnRY0#^%8wtm4Fp@^Nm4`Ow~+p)1bhOk zN5kx6Y?{s0-P)HF@#nXyOW@!u9BSmk?UinY10eW?@nbV=Yy8NsoK0U{ul%fPsui&E7L=3&#o=DX4?f5{tbNGeP_OfpL0lG37S>pys&5L#@puM# zel2TNI@YU}Y3;e3KwGFxFMm2YI#z2lHNYseX8uNF_3?`B=NJaF(1=FEp}dr z`%=q1pL;))a#7S5W+owyZJlETr}Qe4=ib z*_Z`ELgvQroIX@mh%?n`GnBKqQ&RAhg!K!D!OFX2XfqS(Lrh!(n|?DwJw_=*pu))a z0{&QLm!ZdNzod-*HpliwW2S3jy@;AURpL!9Oo~v#9X}-V~<%C#z7fN^Rl#6DTv4 zQvj~ia7(CF+y^Gc02>`1R?$GcM9e2vMj?xR-oq+qit9P`qBqZHBVbTaO9}P!m(c#a zs|LK8Iix3_cP-Kf?1HVVJ2ju~ey1;V)MhqlLP*K9;fY<@tR_<=)^DfXVCz1%m44lp ze>EQ`ZoKfBI5dRe1xM8(OWa@9la{+q7O~)^sn+2P>i;!g|J7Qv2So4vtn!Pu#~plZ z-=u`$_X@NR3Z~aRK&#C{^3Uf<@td~Hi)>bF5JI{kqbN&Xi?|51W=J$*O?|I_{qO{;GT(Pv2HlM{&3|(Lx zf1NZ_NT4|bToWng%ZlXU)c9MLlql+q?2(6shd2%PT_nzWwstwxoHM^xG003OA)8zj zZx}QG8dK?W4ZJ^j%tLF9;B<^4-cz>1opdYj`quY}0VM~p^wB8b%7cE&k4u%S9gW5n z536?IC%)uSZq?{QJcadRF%HvaUFawZ>d&7^>=VsQ$K+UZ&d=!qxa{Qujx*VYv^Fc* z^D{mM{wSDpsiBB-DV8s~vyJ!~hgNs3`D5=?XMo?te2!YvM4h$A=XtPyV$`I+6aiOU zJ73>?F``61kEqL(-ea=k8=~kAxSl%3tIV{rm$0q}<`i$=cS&l--q9|^AIPdwW9gNe z-M_3U&J?2|8g&;8o`sKG$AVt-;=X@(`>`xs&R7p*KnAv5Gl3G$;P`G~3$)+h20zT`*eyGD z?1Q9E*0Zx|P-32LHh*>y1j`Go>D{zmdnzVi;o!8D;71N}F|F&B5=-_tYK>hYg$RKM zw}mq_^XfXV|N`e$Ir3qwRZzPTmbY@fw|LNM$)~r=8?!Lo}{yMuIvvZFEz{GX-=97AqehDhG)F#q(fq|j>&zI-B zTPXNUL|$quAC(5B9uxvfqK@-{4EH->O?E9zg(O>L-vPUpL3O>?)@}R3R;~w?Z}(>? ziVI3{T#grd?ku>z{8GGb)6ZNzxtJVlJg%K-q>ldzSP(15JKHrG!ijQJ?r2whDIng- z7FA`pcJEtlcDaOuI4+%Q?eT99q^WjQT8J7#gt4c$I$egUiMM$?CaXGDCy8#yUsppMeZXvluGrfE1KKA!oZ=PJCmQ@!MK8VsuHcIyKU_ zjHBUxgWOQF{Qc*hhpT#T#g`UDruEv&5Ns_}qhO3wdA)|*EGhwc4p|pZwa9jVz3k#d z70t=idE1z%fJT5yG`sv_&5B^QV2X|5I?kta5#NTUvjI_p?b)∾MF`m}fG|OQj)$ zr`o)c+Z=Xx_xjeYEZI?RpUvDOHEw`*O2$+A*cZYd{OR^#V1^}hk@CBcW;;eO_KAWv z0_~HGY|Ce}I*V!TZ;tS?=z4p1JYOA1a4Is|>M_7~!$jjlg5OloX*Hq#M*q&>P`jHo zZlvAAH1h!iI{*3WG9QlT7&g`7Uag4@vpH=?&XM6{H=kes4UP$#*jFQr_RZT#Dd08i zQsXS;<1L;^PAqR6Jp&uvpcvpD!Yzyu?8(nJ_SEhXzxd6c)0-^IS|2_@?nkRPTwRQ^&>g`3L5;ct&fbc6VMo z`4y3v?~fns5b-GL7{4fu{bNI95&Y_SbKnEK)fJHZRJL9hjbB0fvw|x;RooblF3;+| z6fiqK**BL;wt0l48{!lc#uBk?(y8P}>Pu6zA;>Lab0e7Fj5cJE~`N z?Z@-`49)2xa&%CiWFQ&SByya{*|Mp#U(f3cLm@AV$XHC?A2~nzWd*-D z`0~YL;)+q1qW-|O#azXKn(iBEOBVYGT>ATdLNB(V^#UDQjRp4D*wli${9edk7Q~i${ z!OzahuMcUa5itsv$Yn7~J%^qf{zp}N^yJ*@8Ns$3ik6O6+GicA&roUft9*((1SFV# zbL}cLm`yia^y0S{<&~KPP#Un&hc<5NPy6)hbE=AK;nGO# zHAQ+~+jpTxHUxN`NR((%vq^-#M`jIFeY4n#yS>43B^;Kn%7C<-ahNvV! z57ZX9Ow?jlUm7R!dE?JFeCKJ)K>8882M7*6P{>hzq>$6u2iv_s@oYBi!IJPw5LnXm zqbgO);@`(pSOp>P0B}1$L-rH4{aBrP>#8&%*XAbp8RZ8u7zzI*3Gx~K7W=hdAo3ct zH7YL4$z9v!LAe(DVR6&zCme{9BN;Bp@?Ieq9pm<}Vbuj%ZMV2nMB=!d%wuJZOz0sg zZdMi+=Sm)^iyxG9s?%&N-KNHLq}rfMC6!x>>@`kpUT(&|H*znoW%s|mq&B!%5#}o; z>`WjG)e_AmPyTU-ucK;K-)EVt>G>SJ7G|@0%GE$T<{?bM;knQMbd2YX?MZ^oPMw)< zU|U}=rj7mVH7SDp@Q*CtEt!>d@}X-^j7|C2q!I&{3XH$AwK3m(UZuQwDLzz~oNW+3 zXjaK<(DwSDBhuMsJA!F2g%WKR0Wrg-Q>xMvd7p-`z!tY__v3ye2Nn@roqvDvNlewol=CKXq{EF(0`ehJl>fQB zG2QT>`52C!$l4P7F~>M|fL2;t^$P?_zPc{Z{n=dMKD8>fw=(=xlO%Tx_VDqLqjE?8 zljp&jLtK;!74=j@M!+|(Nm!C(+f5v-m*xI?-jZ<^RL1-1B^gDdJBmn3-2hwr!*5_6 ztWG$f1E`)Jy8}58%HJg7W zseepWDHZaEStVHc!yFO}Qw+@dvnE60liX zTu?+zsj8FF5QX9=weSba)pKT;fkio?(RnQEf*Pua5ifC;}>=Qmm1TQaBho8zw+%iUVDCloDFv<X^u-^jMfIec;nfO4*WEziF?mlJs&XsHzL=y*%FcA*uoa|;jU&CLC$x65t% zRSnCzS7~>|^-@W~UFZ55`FmCCV41J%s@AYYadMEWM3}8P(p*N45oWiD@D@gOR1X;T zWf><1ec^MS`jC=k|99ST_W|m#9B#h#{0McR#rbsYfHq1p^;dlQmGVzN)&Q4C0YehO zDvo&5<@@{RRX1#QvcA7F^0mNQ4ucaH=H?I&jm+yI0w!uuEI;MCB+M@|EmHnyx{1%aNsDp*TU@832z#$=$zNrj6Rbjo}Dtq<&VSv2R z`*E&glU`{)Qg@*K+8}=$!dO#nMD&wVa)pi5adHM(*j!e-KWk?Zcuw+vT>x5@uvxGp z{XD;TBkG`p8YsGrI}c;#sV~M^@g)h*mx+kqM^ho)bqpt$n3S0Ygh3@>k6j33{KSbx z0sag2#nL2V4~;R~74Pwt=RV6rgI3eWd#M(F2FB5i`-_C14z1)P!$1ARy0T;X0(GO&qBs^Qbiif)M zruZH@VY>_r#}m#FW?ix zXN|#ucZ8Kn?=l#M`XV*7Qa$}WVM*k$G|`z&{A9u41GE$JQh1mL-UJ@fGqyT%UO0xK zUsLD3O?b=t#esmGWovVU+i$t_*74|h^)Teg?w;l3gk|lP!}TedP3%GD_V@+`X-fb^ zbG$s~40kjiYYjQD<4l7{KG9)&J^R?b2>#+;Jvag0+A-bLahGk{nH*hY7VqcU9jTlV z6weU|6VXnEDx3YyXrnQ>3HF_+-Y}Xec9u)4{`;}GM;z%z{IhJ-LFn`o>z_zPQxkD= zY|3kMqPe=6il^yoNs;TA$SP)%+SKR-8)RL1WtS>jQ5pC}J(cT3M+k(jSkr}=)V-lAaY7hj{%hZflg20`9IWDcGA8*i_!+=7*2hK8_F zi|fJcrO*;7y-McU z@{NBl#vHsqxQo|jh7kQz%dJqvwQ!4}DHtCE3ik2T8Xl!YC+zBvx2n&r=l z_a0k)zlGj)hSFQh9Jw;_>!et5Ui|faChUlLe$MJRAJN3Mqd5JXQG1S@+IgYmYB^%Y zf1Q*!)HCi?~tpPfb%7yymA~H3nFAMqK;veCvFZ+o7D7+~*G~9&}KS z^G7y>n@6U}jG=JH@3GcfD}XMIl`$X#)}94QOzH!mqODviy5HT7`PfR9Z|dni!u4R- z<4ztP%_zZsJNoY1MDWA9SxSbVG+YH^F}&%4N|W9(yxFq}9!(hIt@fMJNQI{y@Eseq zek5^_u0Y;iFzDKI=7mww1*yjY?q}pjn4dO8F#Ho?XX8tT))#Ci8|`r zRsO!RwlFK>UOD&Y>vM+EPU7Z5>S`J#;4o@gBx!ZysVv=|fTz!%hi<{*+LL-Cj)p z{MA3w2pxn(<_?y53TEX9Bi?qMH+FEi2k+%?_d2Q0Q{Vp1(#hWNdi#JNO}xu6Wy4GV zh98OV0PbyfLJWSiWs`v(?ydhS4VXbs>Wrn4&=ybw_7Hs`D9gBlvTZMIiMz~cSG;gS_#^5jrDV` z8Px~l?8@b|MAVO$QziQarW`XM^Yt*0sjw6LA1Wg8Jk|1cc4C- zsSPSKHOAp*V0X_3_&+N7)Cnuqp9DlkjXh9G@3bXj1`W3AnlpY7xo$=@^8bDixse8| zd-*9HV^F5b5)k>2c2?ln44h}om{AL#$f^C|`?UZ7G7}0+pj=WV%JDo*pb(p@qWz|U z0}USWxGGQ-uKAe>6DC1`E;&l{Q3o##zMZW{{mdWw{;pwC#~*l{GR1Coj&Nn?x_2D8 zZ)fF)PVbN5`S>Au!9-@7cIz>rhywDi~Vz{U28l z8JKvZKwm)Qa~CcP_PuGZy*R^Oe`$t0R@Qre>``2L!d9-?VcP{GSLmqQv*#{-PayIE zD_1&!S%bQFu|@MHOIfz*g2?zvcHU7b=hw8Vw{efHT)SP+aJcL_$*DSb)*^5lfd<@z zh>9_jQGZ>$+TQ)k3M~}0u`wf(EU|x##YBd=U6p@+f2Dk4yY-2Qu(+6BHey(R8?9@A zV0{DX$@>pw$LJ2^?rB1Pd|u!KkMB8Z`hjP+1#@`{|CN0R(4F_RCDVfA8)5g z@itQ4~06tsSkDFQ={poo)7I<7hWZo8^BzSeJ`_Fy;@q@_sG0qS4r^bND zXSLa}sG!JlvkENrP^y(2Ep-s+tohR{K#NoDJG8sI0LA>z7wXFbFRxv>&US9yZC#~! zjB@bcVLD-C^bi5A7aeSxC$PC7uh7A$3tpOM)8|aF=LAU6_w;=0T+KNvDJ0Gs9r6uWCc) zrgd9v)rvLl8GZZrwZ2m9?VA{HVG%uDPH-*2GAF;R`(%SH|KMYZA>6d?LEYR21vX^(AR8pmx>sblMMOn7@cH{xL=FrJw5-EfmLWChLfJQ8X&W|jh_h>C zbZ=|X@_8-h)}F9JUf+~$ZfOJRs%2~JlND>-_&-7_(Aag5l$5c}s^x18@EkSgZr!`- z{gP~;-VrDnNiDTndv^VSiqwz(v3?Nw;kiIVZ7g%R%;jbX)L^ZOxpn4ohu%?w@~w*= zUTXYzde|0d42X;`P}c(ZqAt8v079i;f(k(eK)HbH1GJ#-3nH^d#ymehg0B!K2qt>t zjW?W{GV5QS(l+<@PV`&6c(D;#BjdzG?mJ9JD|0<-2SikniNE;8FI)`N@z%ZqGt~kR z_6WE^Z5U{oXTSRDtKS+#1}a7w7YLd9ibF zXU7OQ4iW=`=nKlV?<`qz2SgrhV+U(9s^B~nMIToX8A!PFgic5}T4Xy89vJyKfcZ1~blZTeI{0QKn>8l!swCEXi>JogYK+?xVcgEC6@wfdf)aJ?HYKe{75`6uinc2>W>TIr#!`3PhbH z2sT$>GjMogL1ciq-D)>bI=&?6_L4Ta&Yv>b=1rR9l#F?I&+h;Rx6pEVx20o~bS}260I1Gr4k}oy^@_tZ7s@A1qro4>=+YcOc;5eHCA_VVV6L4Cpd#sVN z?1A(Qn>=!a)Uro77&c7h+}Ag_-zjY50Vd|0>x1#o}h72U*#3Pbwl$Mi&w9*#j933 z1@j;Q#eAP@B9wODKss~e|M(zaE%u9v2)CES1XAy`5;P9b{e6ST4&c_aPgGUeQq^%g zlT%!M1=Kxr+*k+V0{i=x@5@?G1_VDMpJHU~id@6)@N%~YQ(UQjl{m8CnMDB!2%$*#ENXDDnpt>}CSRy?O-@o4->gmu=kO>c%9E zZE8XEK5nB-rN%7Oc|iE(f~bL+Uz^pOxQ5t0xZtN2r)zrB0II&wcSu)a)tawIbhqi zZSH4|E{jk{lkay`BlNHX&tsybN{s}*#Lit*m;rE!-ZvF9sHNoXzf)4 zM!Dh$u_W!L3(|24l&$;aTVtN(zxJCgfKQSqObQYK`q|YKsR06$bA0;iw{N!NqqyR6 z1(7RHY4?~QVM5VnU2{>e%jq-C@sxbo;}fmRY{+1c*9NF;VwP(tYZd zJ5+g7rHY)HU1)hy0j^fsfrp1FqR88^V_Vm`<_O;!E#{Y&ophi#c~GPD&YnI-aje>% z7Md_%i04s4$BK{1q%484+?f{g*CXwTp_m1;y^!CA#NX-MdTYj`r(c zylAh!Fx~Ai2aNt??G9VEa*F}F_U{{IQIS2YM`(9vPj_66?s{Ge>lXy4qs)tnF@Wxj zDWja-f(3*#QMG2nZU^yx`K6SA6;W%(E?v8Hbn0VI=sx&P^%*H($;Yw<8-u=3mrWW! z)V*h*zy^*n&d5IUP(Wn-G*?jf!IT`Ac6LM0o<7>C3b&Q&8!#{R=I5V(DJc7_RaA0) zi7Nwuy+MIpoYL{kX=4P04|Dl}9apd0Wgje;0-L~wh_GO(6^A?9MM^yx)#c}&d&bV5 zza+(*i-HfN|?Kt(~fSGu49xt~~Q(C~AJY$lL8aKjDRh+Ua#q5k6JDi#+k%9J> zkd$B%y~6}Zce2*nWQ6*WcOUL`L_l@EfZw3t?ovXQs#d>Pm;O!yS-n6%ADDWF)R5N- zdac$5TJ76(uobGl-Gc+A#5>NZ9M>Jgkse@V0PDQ0JSqE~H`J8-C-rp-#~nI#aCV|U z0+3y)>rs0?l2;(RUAD1O5gt8mq;(DKDm7spznYe5-zSJ1(lf-!8`Oyp?bmo>U$W-y zx>sfJYi!QeLEI;{o(t!fFDGDbT+4q5LwS)P94=y)~Z;~ z0Mv`Q12a6j6!`D_FfGs+5ZQa~bwMD9wfGKX3t&t{g^c#9E{F^`>~D&>h`yq&Kvks@?)@oA4I-?E(82= zQs>nh*KMN|E7$MZ?O@Zcf`P*&Muy^Wu-FTrS=Xo6wU@76u@u36$ywRfTmV?0Hn#$) zj#6FSL+unG@<|^M8THa0+WZnHSQ&^DbxXi8H>SFLMIiK92b6L@U{6#}QU8pV3Tp?c zCsWsy2)6vw^5wRA+jgfg8!gBX_%5{L%XLmMNevL?^rSR9c27CfSIj zL_OoYolsk|CoNT=jZ-@Vr0A<|3p96B_d?|tKo~gs7caeNQ$~$)%AMZY z8J5jg8;d$`rrIymK2aBbQ|i1EhYxdx2hx650QI{cEpZU(@WlR7`i->cuyDI|`<8nK za9)XkWK>*Ha0Lto!0r}MYp42hki6*Jv4bGbPErT0vEyP`GNcp?0E(~>s@8E*q~{)8 zbe$K@YGZEo*%}AUqQETU+UUW91k5Hl)!LBmftI89@t{ti-vGV;P=T=6yMwqGY~_Gv zjR7+Y@*QAX&PFTsM@WBTL1aM2Ott-4vLVW;1p*L94;pAAkE^`8Vy-qIY((PQOS zB%e82rcZ4?Y$v}(<#&)ESn?rQ8xS89L?-_Ln^7D0pjv!+l;F@_p*?i()(#-1{Gc4m zb9v@5<-xJaDuK80QazsP09EeArhpB5cH3U5B0FVSl^1~1Ncmt}&5y7tjsDn`>o??s z0z3s%qpnPvgH?`(sZTjl{ZLQI-1DXTefomPt}anNj2bY|2Fs6l76A2LZIaujzH6ma zn<*!h*-3(=0f$=))WyaC@mzCWd3{CwNb+D%yf&f-hd94L>G-zZk&~WtifUo_bGnwz z4S>ya1n5l@{Jz^MAo|i(C^j8Jo59@8O_0*JLjNkKy`e)txh1LUcoN-cwu>& ztAna!n`*(~vnPzV+3HhuRo#h_`u!AP((FI^WXd+@;BS4-YckhnWX0Uo~1gb z0e})AmK$RPlAc?B{m>6L)fe96$)`kN4O8&Bw z`p0bOGhfqXl3Xe*VZ+YW{VkoDkHz6a3y+h4zC|Mj1*IMrS(4?MYK z`6hvp#|7NBvz}6A9Y039lm!ph4durx1rFz1M&=Pq&pM)`WUkqN2q1p@C-d)KdN{K{ z3crU1^lBhlSnf85^pWyze4k#H&{sQI<0Gw5&q5V8OV^}j6bShI;X&lff{*~G0bC+l;F|n|D+kX4o z2WzcVYPl^nVCdSlqb2o|GI4a+U0anOuX2#lz!P8Dn%CFSH+PQm4{ z?47PWBAOS#7l8FUf0Dq(7P^7i<0-&9oi`^)nnk{+qXZn7Zyym7o-G?`ttD;npl!D z^y&?}^p2P0FTJJW+|z=Cx>}&100yTw<=3cu7bso8;HafxYgDMi!-8zytO)|IN8SS> zpOX@JJE;_R57eMt5HdavwBNe*^Hx%#iK>$04!i~^#uw+z9BYFTbDw%x6#VDX-zfo_5R>NOolCHOcy_o#bc?hoWnnx{`4 zt)rw`E3f~hynWYEn&=OH5V?tF%=eTm`Ziw(;4F{=VR~wotEa+y^%Pvx-8yv+a9|2a zp&zvTA0OMY4?lJajU3}1(?>`938v~yo#PLF3;ci<`0P_neyT00kosp@a=KLsxBy*r5_!DY$schV8a*_W?I=$|l0zQtbsw z22SM!mn-U59}^@>JyxRylI!YkPMkX4fxF#w#4CU>i`Cb)aCby%yPMW;6U3V9Hu`Zo zL4=fmdrMK5djmF}P@la}F!OaSY5}f>3X~l!Fm;gBm9J`n;hM&OySMLAy0*D{vH=Oz z-H6^gzEm*pS6_XpF~?_iw6MgE9y#iMW^S;b0M^OVCRlVIK~_>Y_Edf`mN%DoNSl^+ zN`;y4RS9C|{bF=|tYGQ^gLU-hfPVMb7HAv57yx_Qrk(EHP>k-N#oKTJ(%})cs(#q? zR8_SVD_y6n&o~&2(_^O0oMMxvPjIK4kgmf5bH7gzIjmQ>-4O6bzHMH&)i$r+;xPleuK;zzCDOSh!jdAwodHBF#sU@%Y=o62< z5PX>Wd*Km1-DjBW>uo-}Cv*J2c%&BagUFB0yxw}oiWMu|JT7Y=tW~j=!CDoNo+yYHUn2KI1PHi&!GZy|o0^v^ZIj`*GqW^I$xWdjJFjHUlN|z3&1d z9}^e@fDed_x-$OI?2bda>AKs0VQvr{$?FqqOR|St?GuAtZTu&rTensS_RaU&U zDtrb1TD5AG%QO5MD4sgbzi!yH^6bZ$8b649|6HdWGrpt0vO{(D4sDh>CU^~S445)b zU>fij3Z9$}gR&>k>xrsUf{i6Mr#0y@g2ehoMO)9{Ag5qk7eq$w64hd$P!vLeY};w$ z4RgGxwH_BhdFK2D2MSIeHPU8|87rWdU zO3lu;wNkS!Iex;~8)z6{ySEP)1uzT{Sfl*Cs&ONb`J9OpY?L5B)R)QU8ucg6>hJi% zhTVG{bPP-$toZ=obiicPWq~Vs7Qo{r^&?O*9Viy7u^{p_0gxx)4`P{TP&40i&@4fcU_EeQkjHZKyE==szfkj9N40uSV~(L7M~C?%3(p0)hGQ zjZl5^PRvOF@baDo#U&2pq`Uw?qktPOj)^h`^a{)i`0LRoVEpsnVdAvSK z(<~k|JP3%)#s$hs@v-Al)88p5JIy_ldc3#N7Nh4e)}bB3U)hjQAqe{`{U3GlS>wmq z%<Q9=5C5cxrKsQ&BxAaeb7`NrR}tDMLV%#9m2I>((nd9s6CJE`Jt*Y3fU z55MtY{tFsq0VWi7suRTS{~Yaj07eELMgf4G+U&+=0)*Yr-Vuzx51;}f0>b|8cfWIu z6$-FSVD!_j5eBvoGtfZuJSK70tXbn?oOVMIQJn?)WWwa%JuvaZWE7K74BAjb;z+Q0 z|37=@{hrlvr~R)By%$jhs33Y3z4zXmd+g0Ro4vBx>@V5>BiHUGyKiEHGp} z#fA;)X)s^k`&*vuq))`fihkkviPH{f{?pIr*+2cmJUmCqvc}W|luf80 z$CVaN#Q?osJ12TNg9r5TWPqmMeD_m(>)j=Sm%CVB0nC8Ry#(i^VrvYqQ_6fmj+Aya)I3uGg!0rt}$2cVdi&6$AKwQ^A%Qyz(N)<0i7H8t& zL;AXVG7AX=fbEs5Hro+Fu7J1vHiCVhe)=hUTEOXr3zroy`Qj0219s;hEA;w&Ztg^z zAwU&}Tq3~ux{W&=V8!hhU=`X#scvmH9VLj)3121(kP{8*nXcj1RxLW*TVtcL_xJUJ`?sGNLM{A7TBET zurrW(Pifmr;CWdP8Q0_Wn|9gWed%^VzyN^4jA^57mH=EPcH|o{diC0^r5ecPjrQzr z)M}SFuYU9GcmCZ{komgmqV$0$@`6(~j>uCd4j0(0$*kJlvd=f!$BWlEfR7tDpf9g> zW@A5cLSIPDGbLDT`sDbv%i_;xYCmr&^le@=` zpR$dccY9s&L@afLrg6=i>IvDWv9DfQnW{4^B^ zAo5SMUaM-Uj_HBUZE~pv&;<%+GiUsyF*bh6IL+6dxh>oL?K_Qo&z_OfkQ{pV%IWvC zTyRfm4#@pAu4SBH-r7Fq=YD*q_^hY-tlWa?X;TU~b%?;>h8p|Ut6%ySGaeqo<#^lX zodS)IxC1;e5oyKAJ?p#gzAc^SoDk`C)kqyyAkbh_AzwMp1WYjD7?-SiOBfWiIsz*1YEQ!)1`8xF}idH0UJ zb|fvsvNE!Koe}uBPv2ewZ97_0N@Cfsczzg<9LqgsTQ}kEo2J*^U-M1R(y}%8c^`SY zAos3f<%{`T^SI^~->$6<94cr^`C{D8916GREpjFXR=%N)Qyhr_v2hjtF=5^3>+O?| z7W=#hxVTH#P7Y-!BzN%qP(RxRLGw_|DLou}UYo>h?6i=s4fH@Mb3LDP=uH%8i`LXH zV=A1A8BytPR)ms&Cc`cu))%={DNT zYNL5OPp1Mt1FtXt>~lSMmge=rTnZOw0Cn00pm<$9bQ?nDD-b~BN27VPufSdvPPDA! zvVO~66^0T($-I3rgzBTfQ9&}Fe)_4ywYd7S4nIK~<3#S6cM9w=;rfds@RB7~Vv`(2TfPpx7?p)skN=QiXyz$1+^?GCG0l7D_ zU&QgVX3g?g_FfEp4FHY9@iw`@;dV?Jd8=vk)=~08StEUbkO7mg3XTAnCQ>FqWcEbZ zPuwG?LmZ~b@9VF>?)kfu?%{WjUIFTjddU|^NZV%bm;Q~tD&Bs}Ue%NEM%hQNbU4G0 zQ6PZG6;}^;U!X?ZfC0t;BAe^2C5<)i2L=OvJ)?V9iTY{!VD>qHJKarJP%zLOFkM*? z8DJEUm9&81fI%ZiWWdey>ihXOkm0b@R5=(Av^W8#fS$~$2qy#)ekKD#z`8Fruez%K zr;X;FZImwX+%P@3!hPvu^+P~xxNLUNJ!fFA-ZrpzFNd59bidpxpmEjaEy_b(o3H+A zguYi#_w%|cUlahXfl7BAJYbu4?y_cj+siAnX1hx-`H6KzPS?2Lklbqza=#M<*`j$f zdvW>pzJR&wPOdp-A@m7;?lhE^Of$%OW6T2fexGMd}Z&c zQQo3Y+(U8O9V)$WCDYc`R9Q4LDmNZ~< z02GPa_wTojdYeO`=8Ru|^R?GcoW>16i++sCMm=Eob5`jBBm%C_6+bdS91Zmxt80{p zvY)v2(q;-YR#~t6I}j^+@UStUViRdX+heSPb1|;RoHHd+&c(`qug+ckyg<39H1>!< zWME9%9WK?>A#IN`63=UFR3s3b^#%Wztqe)+YcI?2uX9q;ZIW?xi(f+j*1881$kVr&;A#3Bpj6rf zu>?3?+9#_oLig?+Q`}X(rP?fkwBB31*cPt*+}nE}JuuZHrIYpOl;Z6y$`_v(w8pk& z&u(`xrfmYNkJLj})5NmTreXv2PdSh4LH<>nH`!|SFSu(jm^{U=c{JutUjXa4A#dKj z%Ryr5lk1%R6@BSfXhRn@Uno}FL=Vc6thSTdt-EUzT~9a}FI>6Op>bTyc|ZUsdGy8k znYc@u(I1@Ex)uBHT#tavXh)ujXLQZdUfaoyx|?8NT$6#+qqY-U&hmbyTzd|s2`c9S zf#a5Y?6^by^CnOBa`8RxC6P`5(zw6_UE{`2pV?A#;>Z!12NMAC+3)8*fZjt>`ziMI zW6%lM8V7i;Pn_7f7U!wonKW`_dD#rVFQY&Jk;@2D(SK&BBj+_fJPFJs7kAK^GiUnK z#Y}pbScmX<6etTKQ$H*^5GSicnm2ErFTSt{cc+CL7Vn6a1tN1U7D8C4`SYLuY;U~r zh5??^yIQlmt3vWag<5=)xVU9j7yvDM!Yvud%RaX$14M;Xu0epdY z0fJARDs~5C)~4A+1>kHhCny|OTgv4a7&%Su#F^Q7HfQEIo2!URFCp#7Q3qTBl^Zpx zuh)e%7x*h^SB|u0L!qEe9BH$%^A(TjNxZ8zZSqKmKcizyw3!KLnU(0dWkF<~BxeH{_!i(A__Wkv__%Lmj{(vPVOq#-Y!a)pNUbrCku-l_|24FX-9J8o_xx)&EJ7^nFvxxajKuVn{@>|yL+#| z01fCz3{AB`0<)>ZIDxnTvjs{sGb`VYNZZ1qGro}xEKMNY+M3kW(S$!iV01@0@+L~x zgbr=2aifMlNn)aYMXth37&4R&c`MLH_sLU5p4PW2AB!5m5cM?KX(b0WT%l70GN(v4 zUaLg8qXi|}!s2Bcl%JylcT4NZkCnbmeq%$S+UPEoEj=?w8{bzok@?C-jp%Qq6*(vW z&dEDr2M%Vsi!}NH6hHgy)4p)QrZ*9X>b7lKczJL{K6hS=5Je>pc262VRFLo>pXgIY zApLc6=SJ^tswXyqP6&c8)aE^aHIpO$#)1vb#kgMci3@Wn;0<{Gjq6izve>o>+}61qYEN|;=0qvQX0kYF(C`VrYNpC|y)=b>I?MRTCwQ8shH_(KwiQKU@!IqOb z=W5zi3x78k>SA@P%0CAXxhmJrBhH(;LN*+6MO?i?uK`^2nL{$;CXMx1e+Llx5$hk4 ze`pGv)8bvhaU3Dj+~E^=Yr)I&Z0_^31Bm?4RIa)S1rWLF*3C~bXQwn5I4ICI_h`O5 zu~WuDf?x;Afjt^q$Cew=hq3C}Vr@zY=*`Z^v7^~Jc3E>3z$n(gIEIZ7-pJBGo01(8 z+gpc(b{4NOYJB^6tD!kdwBEotAMg~Q`K0>2qnd*q(`GI6Nh_=h08Hc z$v~&LF9XB!a1onqfUuMY7iP*+tc_990BC*csnUn>fKvg!67=?!B)R9d7SId)8-dZ$ zoGG@<^vA%`Ss6#&^%q_G4e09-Gv94`^C73$?~os~xFlT)Pn`5VJx_gkzTBj7U?OVA z-M@mvGVZ;k)v;5uC2GzR*Q$j;>{3{nJn>-4{yhgB#%7a{WBU)*Yi;!=haVGmZQbP$ zys&18t1tj^QpY4~+pdjkN?jIdzEq@*E?lXP3M4LJGfVUBQeAnFwe-Qs+SX=|j z$q53bTf6HuTC*{R7C8L_7*m#vv`hyNa6sm7%w?H-QBORC*(^?TSZR;Mz%k=19*-@GjFx%)km*jR;;HvVhEM50Bz~d|NW-@>A&Bw=N3F~ zzy0lR-Grm^bMn(VBYg&IxH!GC55hWbsy6i5t72avtm9U{`UVPKe-p%IO^E)IwRzt3 zSx5cpodSI__ZRwKAlkiq_qsbUP%p6NNNt)&U^;Lx#}e6-B4%Y}c^d-ky(~b4>yXvTNA27KOs^Lu<3fSNLMZrqu)x|7G>Hn%8CI$}Lv+9UwfQ)9Aj#9u&|QPP*+BJG5zQ?c_cR;KtvW zBa{V^0fm7s0my*NfO8om0Wy;ofE=)0QoHsJ=(ZJ13HZ$M(L9g50T`!cWm=x@5jf}q z`0?FTr9V<{-Tq4VwRPLKTeeK`fNybB?J1zKyY7$V376mf>ZgF<0Qx2j8*adefQ^9A zQ7BNkPsojvbKw{Ycsp(M80+7&hvzA_U&9%i^JV92k5dn207p)dLoZIn1i-?j>(<#a zJy?WZ-ISmBmaUY(CJq3u+rCYJYNpcF9-8K7Xvp4DRPHEGWj|v<J>nJ&G#)*;L>d6B zK)yiVI1J-jO&P}y9^%f|XdJ6+*)l}`W*?O?5&&kF9G0{4aveO!!IrTKf2ROw0^AH6hH4w+2Y251R;>kNpLKU+&cpA4C?gP=I-!jmz4?ig_Y{qvaKw!b zTx-+;TH_W@`@*R>Uv&XgOxh;T#_1Yk z0a&92a4qMEu5Id`F=2FVca-aJlG+IA)KXbv+kI?)@AUH_U1vb-I4e^(v=PeNwPVLy zhjVSv7vSdnh2UIVx;Ylj28hAxBY>M@*mupl*#?iRP17!Lou*wd79^kr?SShKfVrF6 zMGMs}*KX7Xf$*2>I>eC}$L$oN`W>9wf%#(tVDo;#+Xpjbf~7Vr$_NrU-Lp$0s>EbG%r~2gD;*&D^bw!;jVcIzD3OE^OVjQV~@tdh^ z(OTaTjl>=;fM8Z*-&o68`tx=Aw&H>Q_*}s>Bj{S`951fG(q32R|W8-Fi+0% zyN>GAm@s!&2NA;KRUj5bW?~8~PNzZSsb&@p`soQ9o&;y{A?gUDTyt|J-VcaO=SDl= zM!>+9Pku+BGbgf&(x`y|(l`f6am~f`HeJrV**PcVO5D>1%i$Jim2g~O z<{>!~n(6_*#IDXvancSrod-KHplz*)hmU1cfEBedEocQP88Z9LCz-% zPCMjEpVGB+lDi+{)=d6S7oGL=aJ}uXbO1mJSFT)jr{-OI(;Pa*b+|Q9tRn8u6%F~? z%-*%yr#rNlyNK$jWs9cnhzwi~q>YPl za$>w7)rQurX=9%-qb*=}dS;#-mjhXeKwaS1Xd>H0kU$eT8>eVv8aSHo0`GFJ*nlJ0 zX@ShA1@HpR9+g8IPQ6#J-B6#eO$}{I^Qz)xLAf{}Q$IxR39$iKc&GF|p}YVG6R9U6 zlj_)jhMWhW7vO=(c@IIj-MeZnO^`65!fw zt2PZRo~+1ywmRNx1Be{k!k?UJKyLUEbR~N z)_egtE4rQ^w3g^IzyA6w2XdZL-{t_17M#hSyF=>SrHewP=k@uD`i6VwpB$Br@xN04 zPV{lU`itnh>M@!Z@V(f8_B_Pn>7tG6^0cRmHl#~Xa<}F?k%K_gmY9QZEa`9{X+`-h zugtM|h~`M8+)Zsh3emA%S2wSp=x^ck&I$w&`Odn8-#!`z06_tFi}U~>eFvdq=VW&pj_sdf z&raXMJ&3iY{G5D`r`2zr(f#+j#_jasHPlC77J%z8a4O(%Js@DksPBl++ZE|XCe6pZWW94%K zov--);@KL{xNB&f9@1y-Jh|3G~iS(ftRJL4|4M6(*x$_R>(&v&E@KjSdw6u|XaH8Cvfq)syNB2(BE`_1> znt6@qeGa0HnBjw>w=bvzRorjeR_2Ki@4-|3JA>#1$C`m%T9B z1NYxpa~{U=#5{pLI12+Vvp)ln%Q&9P{dl7k}{qbEQX%R_> zF#*SuMq9xWICi%Xgs@)_1(7{4-VVxJW`R?1aRvqkkj5~DejbRLz1OmRKIf&(>}PO3 z;AH?~zRNw4G5|MYN<{fNH~FW$XcHYn+0cmnDnMxdhW@0@z6kjXp>h>Kd+LuiNgqdB zp}rXVu}?*eZOfILyKuIk!0ieI5czgJ?)5LOZ`>%4X>N9`P~(3=WjJ2?I7VLvu57Fb zG@L9@v8l%KfRKP-1Ryvd$9#SN-6f0VK)OW0?@*g6;F5b2&>B!5Kpy7;JS+vyf0M^5Cw@A=gj_#!Zn%oCDFW@1NAnTPtji=5OJIL3hVIz0?1gzwIxZqMQ zKv(YBIL9Wc45Y#SS`^4XPNBeZK)3+4qz|wft>Z?=;yijvdBWZJRB@35bP;&UG2F*@ zA4;>`X@?vvY}ZeEgXI0fR5GbIck9*W6I4|%s&~JhOc+SuG zgSBQhTl6X#bXT$BxUqZAjlzpjj~QsZyX9J=j-e zpAmOqVAo`&0ifPQ*EWzU+TL#ff(H~O@AZ{#lSXQDsxKUy%LbrQ+}OF6xQ2m(aaspj zj>>ROYda<9&$;Vx&hcZe1NDdw)EPREHV`vy8vvN=Hu^s2q)u?QK9rH}z-^lrE#(+4 z=0OjON3e2~H~JeO*L>Cemsm= zAa7i+1jb4ZHeb5z4-0TSkT`jZ_Iq)=CSPs!z(P{H_}kiebY8%J9JXom)KT-sO~iW2 zYl{E?KmbWZK~yTVav(W6hJK8PFQR@7SekMHrc>Ya$2^dLZuF68#&r>^S(Mh@K6y}u z{G2Q**1h7KUjsclb#|9>9K2%#*D-ZQ8_wpT1C^gLaII2)+AVp&#k$SS@^?_a_G4nm{Ss}42M{PvPt7CBx{Jr2JN$gR0s%z6U5~r|iw=|pGa|1>TDWkbPr`v(nFy!o z34j3U-BrR$`C|kkfAPf^{&Z}XZbEc)L|!Wuua21~$N_y>yx=ApJJ_UyCf*N-%)o`n zf(HvNfW@RiN6JDCkx4Nd2+_HGaG?&5#rf!|^Y`C>--tZjO(cJ`D;7uSI7yGiC*a>G zy|SY6no6D$W|4;DDsn6fEG!1a#wc9|d;FcUu?WWECV8hbX7Px!u|PzO$`*YW4af(P zbhvgn2YE*$7Iip=#W22489C3rK;$T0%1T5-(qtix^eJ=f%_7QA_`JIU=*k3wdZB;& zM6Yt8f9v1Bzb`bh;7MBy;qfX^-9cp9*(Cvvyxs&D_0lC^M}T($M*>&s%FPN_TR>D? zk^w<4UcBPKE6%;qwS8HzEI=!RTKads%WHlbXtTI%05Ik$xGxoP)@5Tna`cUkWdKUv zfG=@J2KHod@BO%MS~m2*18oANl1`mEwHyG&HJJc>S}a%PqBG}wB355zp&U%gfJrMF z&<6;a^WfymlX<1-l)qUDAjEx$0rq)qK%bZ6E8$bbhqX@U!fdE~=SvHq> z(wwr#2HFDd*8taL^(}iWZReuSgC4XGbOw~h4LHgl-+Qx5b80xoq`enu1VhMs@4Lqp%_*KyP(v9UB`vQ9Z;H6x$Y z3*bDjR62j*vb%D}>0BIJ3f-0J0swhK?S$h2yEzx-zM*=jl6=!&WuOhxrg8VCJXBs8 zX=7!svOfn9xyn|}Bg+`qBG)f{8;}ri%Z9a^?DJ)7tf8*Ukz?gd(YKde6y?sRhd&ro z(tnf%k$dR%>>ayw)OQ-n?XSdN4^N-L_0*Ixv92#bRN$bff4Ebj$P4}A#q)AW&@0Vw z0OZ={I{;7hHJ-)6k-Ytw0N8X%*FV>D1T4`{0G*cTK1CV0|8if&6|kvX?iou*0B?Ec zrhfqe0PwFm}O76Hj+)5G z-y`rF&48y!mq7oA>mu+}R1TaLxrbd=pN#7x>99e-n8ovZ^H|a@dryhY19>I_pYbYt z+5pGX_L>OT0+<5Sqpyzw=cT^}p36$lv;+GN*|tsF1!|qP>2s#qlo=Bp#^Y7?lpXjj z8aGAP0qudhC-0?m*l&F7MV(O(^%ZN~eCR7WFP~gL)C;bXIGb{RYozm_da)J(X$LiG zXfa)RZlbzxqKA=aO9a2?@jdRRS{%kHGYXBhn9i}}mGegVjs6}!f2cqJk$*F}a?-jb=(MRI?d-ZB*U(G#`{>UG?Q2&cdZUg2e>U+2! zl#-=x}dwIdvi3M1*E=5^6o_?7Al=(^27vAanJRrjBXPMLR z#s=;`Xii_Pf_v$>|L`zwU5!r|8!%3Z9*&99q;KZjxN|YK=ib41<;S}Ia*sNvu?qD` zKV9lPtouvMeF9yH+>{EU8AG^X%0)b?(uo`INaYK{uX%sAnG=I3S8 z5C48}1-M=Ui2UICmHj5y44R(N=I^3Ki+nA&ckkZ5mfKYhS?nnQc7{;>6rdkve}{b- z=7~i1Pl$J$YepsId)$|zPoNj}aM+vTXYNl?i28AWVE|yh!+pPW-H!g3H<@Xyu0E9Z zL>@?kaeLXkkXE!8#2ydnkq-A?jwc=R02s_34tq^R^2RgWUl%4M~h9Ht(&dq$6{4?I5yc`!D!|zGo#|ru`IuHp!)8sFN%2j|d0Br#`WcoN9 zwArU(zlwb-`Z($~grGoGP#}QF_g@QuWtY_7`u$3u?6EMHA=J`546u%V8@EyJ4>fgv zi0(V=W0VDvr;ZvaAauO7j%(p>-{k)FmG)6QJ?2!*1z4|CngC_ompCRia1Xeu`-$gG z_W*!c07=g454GGJR{?@@41Ew0s1LA_bu$1wfWHfx+wr~Ty0-wm0l7u_=bXNGqjRAF z_ayFraZQ^E!Y&06WAg@}2oMR-$$b_ml5}x2C2;=*nj_*e%AB#5*65fQGS?)(m%qH? z$KKC>$OAwzk#nLOHN^Rtj{`b&5OFMgVzfne+fQIw-H?pLWN7YT5jHi`RKCX&#MJ zxQ}5}$2V@aUCvoY^~g0q`{jB->x)+|-&)I{4Fd0zU)m))`KPqx+_BEZW%c0Sl%Mt( z)dB5|Yll5^&X1Eej=OZdbyQS;)IB_ONhs0{0!oK;4T2)lBGRFVfOL0>I&?@Xsie|9 z)G$a5-8Bq3bPV0~UcS%s`{!Nj`IEKaa@W20bI#fO?6VKJZdgp!ubEOd3RWG&{!LxE zdgZjVWHa$1R18aG&6+)^aULAFK|qTmQp>?qT2ndDma7&B(y0$5tmZ|E1v@-^Ri+A> z7_oc6G+lMvv~}^Ez==j0Vt#lL^|eQpfZvvbGs0sl5wkTpPEXmVG%eH!ZF+~FCC^XzjOt}qczjFyf@O8QNVJqf@& zra0!*p--PV9={o{*_v$1I<&j^Xil>d?LJ+lBHGz+(}`qrD5&1OG86S0pE&JJ=6|*2 zm=#4N*ycqz_3?J~B67RJ$}+0*OK(iYrFnSd_d!(*LhSh*!pZ); zo+cY>635{l`u9f>MVsPUhG!iQ30Yk`+z)crDN%Xr!MU z3!jI?XW4+=YUL6oBqeVY_D=ZQVMQS&|Hle#xK}sH+Crne#hXlnNB#Z%#~OGeV2%N` z$K=ZS%)?0`*yVgyRyyH93Ablw-eq>1qCHBvKz&w+JtifvYa)8&6pT1>BBADUcKE}B zG{1Wgn`G7nCrOq$CWOac7M6^*F5$R=?=y;%Q2&gH-{Zq^ z2XKe5dhjaQggj$bX+fr-x{g<&`u#Z7A_La;q+xiHU$!jL8E@EA6Q!O%jVZGsH5+<% ze?g4mKFMIPdaj~TN5Np>J!{8InyxE+>8xz?=&q1-kbe($cV-a3!M+f}Y9;FkOFP7f z?(lFy?I;chbJTwvQJdeNHj8iB@l3(TH9+=f657XZ?)Q&I)~qMeITh<)jnn3!AGkc# zUF8h5k2?56R3#afihay9lVv59Gapr!c}h6)aN{Wg0*N&hT{s7I5K$sdT}J};NXQ&0 z=Qn{=@Tg{Q3A2RH=DlX*IzM&^viiF~G;ZFw*E&6o4|s0Wl~Aq#^4kVy9r;$skD(_1 zg;655d&JY&&8YJf!Q?;uBY^|9F<)}H_e2?R*1Ke5F9_fQ8U^tqPre>LUZZMl@{#gM z_5@>y3CL#@TM?20v5caqZ_$j~n7C@gr*O7Jsrx&ThuD9x?&)^wM(Ct1V{ssvgnX+c z`@6m2&q;FmTrW4%i9xb`=eAW}Kk1LD@YcJ@mw0BDT|L}9t@b-of0aM?z*J$Crdlx_ z@kG>tMJVdx8G4n6yIf+DY)_iJS!f>00HxVD&ke+*T!UnwOJo)UsGB1(E5i8^ECJAznRkuYupBcv$!pFSueVt)1w{4L?7B|7D%e z;i}JVCt!k8STG_&qxtaR|oLMS33)$o*A&ULNjww)ZY;4-|9ohzoQ82(7M()TuqE;z$5|LIFhY24;molzB$` zRszV~3xnDi7)Kv}U>rceVHIUhBE>)O0xKi`VXut{kTJ`yJM&R~5cit~TcJ}(t>3?X z{hgDW`|@0pdq{xfo6l$YNkB7Orp3X8)RSpm4M{D;5}ij+JvPtnz&{@Pr6l)48++|h zvl4`C`e3E^TJH_K8H{NAMq0(D^z95^ss=|^!*A7KVeOe=FFE~F1eT3SP6!~hx_DkT zjLo%*8qSy9aCT%bf2zf%E6*+@inY$rAImJac?>pnjt222m>pg4vVQF8rR?WROa`Vi zX%Cf2&u1GvigsojCuoU$UsoYNUzxb2x;{ z3-Do3S^2R`>p(Cd)>HZkJZN~rZ=iO}O8j{POON^$BH^Nqs`Y_5%v82jidJ5aW1{G0 zHjd|Zj*Udk+A?$Xzj` zZv`@*JFjVljH`BseKU@Wo-4Pfz}JxGGJk9?&3@3-YKgVA57_ znAu1Qy$~oXSwn^)XQi-4!K1;0fJ@!Jte5u1n69R0snTvW7P|mCr!&S`!xqwxKIitQ z{L!#qTe^i`M{xA(I(~A1$Eop)w_V<3lUO496QS2zpu+T;;JgwCLIfc(F5X`lz>fq% zJ#e#W8e`Jp;}@NR)1^KBwor)_qvcJSl|J2_^A#STzrC&d+)uA>dO8xY2i_JO>3Y6J zm6TR>M^uvE32Dj2d=guTSPydENaO}c;9-aPOL+X7{|w}{Z20LNlj+Bgxu8r7Rd=5# zHMJ|!FFPS316yUu2iR9ey1V9N`^dL(gIV=U>Ldc-`5@QK;jSYuNOm&52S3`o3 zkPw{P$Ed;mPa`cTbcw$2G$f1?cNLqt$~GoPG+Q)bo5sw$6>TCP#YkmA5U1Nk%r>=3Qb3mUy}1Y}+zPyYKD(5-t}nT`qcC;u@B|<@E0hk+@aQLr9^j0^c)eHPwX{u7dLpsAG4! zs;egd+BJY8WKpS*9(bztfHM=~`CV#|173pbad90>f*tb&PCMUlQHO$g?F*MAnSTct z&fqt}gVe4po%@o{S(mffu9v^3`;sWH)~HJ|<6tr=W$$@M9`b^?Sb5NHSqumtpQ%DN zajOgEt&!%)&@ylzZg%9q&$^y}vxg?i&Y!vDG>$HbM;`KF>Z43uIy?BO3G1Uxyj5Zt z-sN<~x@RZVTR+8=g)!R_jMz^X@NUEBLT`B7tVS{dUGf|rO z!r>77bY-n}2NsmpE92DUqMq?{+EWL6$O%iW zUhkwqGT3!hXSv_B&;vtq^UAOJpJwn56;?+uG~nX678U_D<0cy51n~(PR*%VFdjGh| z)9<&-^mu;g53)HReOaW!f9$qV1j`J3TV|#!b@?P(u?-*j<98J{vQFkwBMdz4Ck)-+`dzX&eya+q;w#64h^ddzAHcHyt#+{B*zgzfOP zHoO+c0~Qi`j5G{c%%HMugCqUCn9BPZ#2Xt=t&&dd`YC<-{VlOZ<$OKrFrT{{kJs4? zb_H%C+OQST5OX&%m(kb;(docC`HIfvoY~5-`QGyN!eyFZezIMRSO1 zj#no!xDLhi_^PfEEbS^lTaHzC{v3KyzMMlI6W#)LeOSsjzql49{ z&SY+_%p(tzi9Nb1VRMW5cYw$SHgU5zK>eB+h&-RMlM2Aq`d`Bz_?4zI4tG^ZNPX<1 zWe|R^ykfDcoNNttK%z%aGhpU;GA(;0#ur^*mA(c#K22Px2OZj z9_7MmB%sa&5 zSjPNf3_eeczH(#i0HUjC~?&PH?h zrjOqMnPj|bE>OLcZW23n$vv(1Hje3GA8~r4(;yb)h0AuwbxkwM+$A{LX?}tYg}8st z8=U}hFz$)3)$lilyx*EETN8490{0si!ChJU9!crY67pw4HwJ3j;8jaqVuT1dGxk!N ztFS8AnX2tooB>8R?U*M`*6a#|ohba>J&2feUJi*)vWHPWK(?@2<)* zRGD?Cb8K!1#Hi|a8JmZ+FLP5XueKrp-((xj-xopsUN27xAy?UEsGj4_>1AGEx>ao6*|o*71S@`IaB*0`3V)Tvpd zv?JyeUV>hy`0awg4y|I+?b*sCGp{ELz;}+Ic{p06MZPs%P=HDxM{XuoY+Q7*nN*Y=V1Y`ZsOdi1OX6V@OCr! z16PB+5f0AnaaqtYp*D&COiNG@r?~_!xx=s$+yrX<@#9A= zumKxVLjH;33kTK0Zp+P9i|?}JK~h?povcoj<-Z1xEk@a!@v^{`n0DyPwUregdiA53 zI;UMN`ULX@8^#186Po34TE$*I&n4OD(WmDhDFJ6YLRM0n{@o`uVDbAl4E@s|R=ve1 zd!^aAxwv{tihfP9EIeHh_jo?%O+^_`dPrDZO3z<$v5Xy;F7J4Ib3NC!f4*5%hP?Co zZ1}x%ExD(c^<9539iO7!cfuIqara8a?%v#ZA*5&EE6#W902f99VAs-H0b_@(tAKFO z*b8x0vQBVi$xJQvQ)J(w7NZLX#ra5w+Rczj8BET8X^hAp zpr(UGa2)yv!_RF7A8wB!axJ<>&U(_oZY%h&X1d6*aOSZ2(Ag+1SoAnWAMuo*&SB&~ z%CEKWH+YO;-g#h@CZ-g{U6YbWw!KB+{=6m<{DF0Bv>+ltUf6MomO=F0n8t$ioT)cf zO_gX25?@M;Wy%l!%VHA_p8*2eZO@8BUC5ocK!Ofr%5S7W4S2}a5c(Vo|3gd zfNn2G!Tmg2`sTCcJ87O%2ZQpH6lWSrbIsB&D6>vpOz^nXs-s9EkPXn+mrqsY7|&5=fpb{U#NH8DP4z0;{KBI!Gu@ zK@X?*zeJY_rE(w2)|x(ZNwT^vE48;hUw!k^Q~i7lK_ekGKY~_>nB`p`D@i+5^PAhd zbuBLDK#aOHRZFdA)8c%>LCbBB80h8U-0UTvM)^KpJ@25AkP_ULmmqJD*U1Ol+oiy) z?o;z*H;IDZz-sf;t{GOR(eslTr|oZ>mXOV$;d%LZW)&tBmx2<6+||oEJ*Io+dCfjR z$#^-;Tk`5TJuisNdoH-e>C9LNg}lETid+@nFKL>?CotK}$=!Bl8 z_*gKNOn#+!=siVjLc#KeT`A&vL&d&%DutImC6-0J@7sw+&9?>>u}|6;Z}%Yqb8%+> z^N$_>D|y85oq;`OOJqYo0<~Kog1)X z9B6_=p{JF@{U0TFAcxpP0!Gyc?j9(Yo2FS3e)T<@d+Rrs^S5(1dk_Vv+F=x(Xfe>R zA;Wy<23vwqvGNy)jIXzgEH|xT{XfOrB;K*wF+xIKhQztKx?VN$$-edk+Xvg4q}ld@ zdKn>p*p%K}hm>6E(EjGsBX6WPFept%TIVqHc0uvar5COtjuBcUnvBo$;DgAJh%c5R z4jYav4m+jKb4DWQ2fUdl20-p>@mvM~Kg$x`AzRhL6`7Ry{in`PDAruD za!3@;48SfNY0`EPq~9;?c(DrmmwVLU9g^7bEu)fD1*kDB-@~r<;fZE_uOf~GXtITx zcss~AC9o2e%x}gj37ef8Y99_3| zHnVp`2W5BG+NZaR!D9ehGW|2j&QW?|B3@=6#GqhR+$c6*@2+s=08~RjIgeiNmDkLC z>hM|Pr?}aQy+By4%ad$Y_M_$S1k8hGbFx~<2~*`~U;x_mE`9djkQ$p<=dteIM%9%8lsJn_DWS{o+onx|4bCf2Lx6NNcFkytczE0~tVYSIBl z+|nT~FyBP=&k_Y0wXj}V?pzt zg{I&6dh>Jaa+;pJbK9c%a+n1&u$0XMA}}a#+&237{xVtN^>AZkaS^H<62rK(lbvAU z`jpLlJ?+WbHY(j;aR=?tgd`N0>0f#k^0B(!ebmjBj;Isu6;(Gi< z+Cw8=w`Nl#PO#i*nKsbkH1N`7tuIMQxLhvoYTc#H;md$2wQ_!`-?~xuoU>*=lTZPN zbeuqjKEJtT(5*%Mmi+rZp{>c5GET2oC!SaK(blsX7dCHWRz9pEu&%1tQffuKr@(68 z8ztQug<2v@Q+E}BahYc7#e)B8Apr82-F={oV>EXA#pnmq1^@UHJxosiIzJ=Zf|9GO z#k~Te1Tacwk(F@6FQWK<_xnwBK!;7)=KeBYy4YjwJ90?CJWXYvW+Jz=du;o+dhJM6 ztdZ}4jue|$`NQV%`f%E?3!}P@BepNu@V2)Lf00ak`>ORW)`&53FO0zZ!0*VKNlCzn zcY&pLfvp+1t863>03(Rs%yuVmJ9I=);Wzc@9cnY0hD&JUT6nrXuN*x3d8ppv&6T0xa!zT=1{AHZCJ5U`hN6|-vnL0g_G zQc3tDBVz}&OCs20A*qmXD(--kEN=n~qZFtmLGaXWSX0Q7!|>7qaB6G$%Spm3{vjykpD$Hv+kBYBL)jXk`iyGuhFN zs#rDwH5KwtLc#W4t2?CHi{xhpuo%2QRj$LR)^6MRDkw{rcce?RdnYB{nT?Sso31)KUo9g_)th8lP(Jje$ zs;H=e0+LAFzto$kNs|J6QU<7-%V<7!m(#N$od(DXtG~rA+f0tgcHF3=TD?}wNu<7g zakhESRdjj*Fx8A&PEEn0(MppuBpWkjw2#@OragDsy)xbB@bYy0RsEXaD9P{O@%VGs zKYq0bTJ+{-^`CZ|_A{GHQu!v|jkN2~E~MAbZLZik493Xa&^xTkB@}2?s{W1q#zUel zu=Xg9+SxB;^LiIM&1D8NS5@?0LW=)1Q8VBw)nAHlo>W;raQjQ>f=u0RMYLVZUNudV zkbdx$ftSA{>*QMD4HGLhUxv%0(b?SL@@4yQai6T?6KS`bD3)LZ)Bs&NtL6L1L%p(g z2Z@+BxYebE`tN^VElpV|ZR%0{u;AonK%E_{_H;<3Tw=dxMKA!U4&meOxVfSpQC#qZnggf1|WQx;7z$%DZp}b+b<ezkwXB)FMb`p);eWHiSlM?isM7WVtt`)!4>1IDc)0{fy!DQfdffs|%(p(fXh~|J zdbJb*%s}6~vkBL4qIZ3%!eq9GbqCm!1VXNgiXkbakfjv!!086holW{2QAY%L28?LA zIWFTD79KXG|E=Ev$jB@Y7(x&xmbqV*oIZZc4<)cs_bJfK*j#zP$i^q1s4wbuE2oX? z5MQgrG{SrV5Hy;jP(4beLf^ZBZgX4}p#wZL?SMOEbo{3Xk}*nv7w@8q8LxwwnEM$k zwC3$lB4?AuMob0QJ!dzgUESq4SdSxQn6$FQ31)>XgkWvbX8w6^aH}tGY)_=|3w!58<69|x*SU!zW^sq9 zB@4IjtnTTJkSJQd?yQCa4%fFYk~dj3z$fxEY>-dL4axdvhksc-hW{N{1Il32Kl-3Py`r~l^j5*`yq?_+TuZA31RKXK z==V{^eDiF~((Z^w3%FIbAyV0gB(Rzj6=>*gg=Q{T47ynhI&t4!2~&rila+D$u4Q=n zi;y9DN8vyZ*Nv4<=%S=r!8j2d)nR#PAcQ+n7NIK z9-qa@?@)y3s`~xx>4}rwX(vCg-MU%V{Mo#zQl+N!?=w=)=SQRSv0rnZVp$Dtk@Bp) z5}?)Wpb<2w3?J>BDQfZVZt&dQJk(16yi3JSL04X_n$S5>>7fR!awsRIGrZ`GoZ5BM z1kV*8@%e2H?oO|%=}#8|ot5RO`=7Ipoqsa^qN_7c=tW3Lab)pH+CDFca9Y`|Tz#l7 zV7l*vNb1#3JbfUeYqI3*T4E%|FD#|Vdcc?_dkA3_(d>s82hYi^u%Z+xoTlNjSVQ%> zXT@#h))529^TUy~tLJD`iihJWaf+asrvWg9LgEYG|84r+)6~zsR^L2mIwRO_tay>a z4VN7aHLi0sg+z9B3rneLc2;0foVR(=AXRp+XBCmP*uAy{N-Fm%Jc;%zAjzK*pW|GV z^8+t?>Y*u&!G4d;B!kRs`CQ9sGhD13|9FnS4)*kcmkg4}QWZ88m?V5@Cli0;A@T9w zODSOmH^4w}XW`vUjYI5??qwSlLmWdGOmFKgS)2lYSff3Z-J&6PIla2s#{4hy76~kH z=U9I`Y0~^VZ_R3@Fsth!^dlx&)@B{-)#-@Jb&-Eq;6iPDe)C2nmV zrylFAMUQiZuMhpUBC#9v_$4ClFxTGdv&r*C=FY}OX6r%V9jv@NTYBce*}8>F|3iJb z#+$fwQSO9>dYH_O(UzdUc^t45xSVeUCuxG>5Cp!u;*x#1Ur(19Hu9`1~4DbxUMi258FExf8J zN!yV}Gv)6>+d6WijfKv!K={zr8oE@!n^MSv$Yj!$oVtAc&BaZRm=Hm8godI zOA!75*Y%e#Uk)Nx*UYLuMk`IL#4>xVS+Uv$F9M#LPOT%M5c2Oc$2*Sdj3i$eL+vSB4^>!QfZ&6&R^&f}DLUrR`0iqBNU`1a$2WAmI5qtr^ zStoU7S>>?O6C=6PI}o)1?9YEWcJtSNIaaP>HRj2y>X_b}hkkm+u@DawOM%1x9#bj{ zfmgTGGhO@C`X_&V0n3{UXqU0v;x^Tt#dg;U-svv#$|?b{!KBz+pv zwZPh-F~a3`BB|h5FN!katG@upm{tUD;agZ-CJFOj?W2}*`${8)DJ?4C`J-Aq`u zIXfAY#eXlP;1X$9CgrWcz#SBfNNhB$`Z)1dkCz}GXaaAa^Bd={&jTGluau9}&)nSH zn3Qi;1cQ7S!kQdr#5tA{?}e$?Tef9@m!VKe1wm~w$9_r4g1o%*Rmm6xXatUKFIQzM zTAdAwxsXKO+c`@&IV}uA<=e6}VIn-~FQAQEXV4pZ@N!q4zogS$Sk%3HtO{R^{L4jz zZt7VJOi->Vf`+yLH8+LT^QS6*idE{M_R|cK{Us&gkzkO2@7z^OofCG`H4}Jej)bCq z+<}gD7Km7Wxa=$sX&cP7ZEh8PmAZr96zR7xO0Z}=Z@I=8hqUH7@72*~d2J`!H{-&^ zou*Mt?!1712hbC76DCItAZjIUPaP($+fx_CpRu}Hdns#HSk|A{2J{NZ{LHZAw0^}X z>BwhLt%H8?w#ZYXNUthcsk~-uqPFVdGTai+Gyf&oaEgF2;LNrn%`Wj^TUwc{E#UIl zHMo&)A#&;mM3isGqQT0Vsm7$WM1Q649)jd*6I`?~gRHTmbJA^%rz4a)waEmb+#_km zj50YySVe4)?mw0Fy+ZhfB3vJ0a(qEB{9OxdL@II+Md3YV_ zc^r*oJD%63Uj&V_Rb+5_svp!_K37~;x+FISdXquDTAQHeT4T(_%GX^RH(`fZ2>7zl z%o$sOpYS}5XAJJd`71(wq4lq?^B*BuNcfqBj~g>ER|>L!_J8p179Lb3=t`Dg;a4s$6$~*J z2O*1umjv&aPFt%<=kM|!a|`y7i)?X^JXJhF*>ut7L`mAIOX`EK@|gWeT}12bU6XP4 zm?((!e~&W2P%U)7z5}9#fLeFU)on>kKj?C1o{yWm>qQjpVgmERaC>_@Xnj_(?(abn z@aS~Rv@)cv;0=KHf`rqF7V1m*m_jK6+TQsY0jg-CUq2jFkQjK5JC`o*@U!tMnZRkJ z)WDh3`JO-rA8q9ylS~fRGP7pJO{*mhP-16?&+c3^=BYXi%M~l$YX`aFEPQ?m^y94j zQ_v^7^Sh<}P>H+lUq&j{H>~oI5Ep??EP5|1@O<5T;LM!w7JpD4gHP^`t~l%X!N1M% zBDvOAqnb}o79MFd#TF(2OcyQSPpgYZ!6H<|okigW`d7nPmACPEd zN({7q<~=VXOw=)>%gmi0Z5gIEY|4+tLBh`)oaSM3EsI~6o{isub-g;rX9IV+@oyOr zu!;hi z5m#6qhtE9Mzj|u*ASPzpuoytbhRI) zp3)#_-CcjOqtIeqlw9T_bRcmRp*>rMlQCavI z%N8GiO7>&>FCF>rFFhv^?uw#A4?WhlnI=WcJfaLdPJIYvp*RO^uX`v4HwiW$eQ%^eC!2b@u{?1W<-&ecRT0@H9?UP$b3ZJK zL8KHB&*2JiwpZT;O&UUSX#~x-!2%8m^XMpGkp%PPJD6I%GsPU#B}azqbo&qKu6)2Y z;QfRD6jfoV;&;B160rYWHbFiPt-=!E&YP8^5?c!_1R9_L^%T86sBm6aRSEwB!@AQIVP! zPrfXd%ez&`yG3y*v$`Tf+TQA;KcF(ZqFAhQxMMC_P4FP^s#Uer>RNiA_>Nxii$5RQ z1E%K!Ob7`Zb+xxDJ9=jqp>Fzej|ji;LCnAo@CATFGH^%_ry3<^s^~YjmwvJy6Gz%Qa(c!Lk5F6@_Ep?jYxZ(xKmb)+g zZ!CZ)0?QlTGo^&J5;!@3*hy~2xu^0^8WUkB_%SK`#3xrvy1U%iY((d58C8!lK;x1S;W~!ReA%SnsiA zziA&+0jK9j+QYPKNsgA&IX}$fdk@u@lVIR=^}BU@Ciua2F0VVE)1^{UoTRi|<+Zgm z{S!Z-s+1k?^1o&dcUGNxN# z<#VmV^@bK1bwPfFhK~-^_*ftUa8Pj4|kJ5%MCJPgX}g?W1m1LBw63m5lfU zF@?{@NFMWWZeby!_Xemc&nv*Niflooj(p~uK!Ecx5oOHn9!!^VLv@^W1tWkiQ1$uY zTB9atVwuR`>P~BiLYa7l1s;#+s zS@`MXsG4YYp>DG+UNH>yq)tLI#FbbmwCi#0Bp}?((J81E}D>OmpVN+6t`~WQ*7Fq z?XOE_bjg7Q%U0eeP)|!qaH7HcVwQO6@VX0kD+8$}pOwE3Gfi!`ZyPoF(_{m_ zlhO&lL{?L2&n~^qgk8S94!+nn#SYfzGj*VYh%%bsbD08S(ln8+%X9bV;N>m(qx-j> z|IZ6Ru+fpFxe+lJa2S9!?g1$J1Cx<0b;m%z=RpD`Orfqv(eR{~$Y=6I?xxFrCS9T@KbBgYQ-2FlIv2=j5)&%=ub z{qJ)E6?VMZK;nAqWm7Cdi{Ck=;xZ^aot}Ic{!h`*?{Y;=V0cYWZx+=XH zzQLPxgbk*Uv-P1pA6+1cLh}D7DtyKszhiC`4EmWQvn7gbm?Y%nW+z=JDJdO*l@DZF zat0BL#z}ub=xrNvPI@mEOSfKkL`p#PDne2(ySGwhZ%h`Z&>dV-r= z&!OB)W|m{GDK$ku`Qfw%C7eLA#V%erGhYCsFtX_1QMi05&~sF{!Dagj#Ed6E%y{)c zIz8Wrha&nbY#7Rp6g>9Z)H*YxkIuMC*iZOW$`kbqLGI@jWq0=DOug?c|2Jvhs1Ci3 zk$-G6xKlNj$5|5_{pn;fJTa7_ra&cwK*8`#>#kwJ2B>8iIdrR8;iD@}o5X|(lQ5x0 zI*|`yO23355SF4sY@jl7LpK1rSf!5O4!&*S{S`0CI4lfMPFF+H(-~!e1wxI7OG7g` zD&%QG?>^%+GXg?a&4y8!}oPc3=bZb;|Wx&`rUj zbDxw;A?FSh3N>~X_4#Lb9xXAq_maV3qWFi>oi^4Z)qS<^$BT=#BYKOYvtIL?_fD`s z`Yc;S?_;84V*Ns+Yq$N^-xuxQ%o>4Te`DQx+zt;1i?m+AqD&@$Z_oi`m5 zz1XK^(9@WEcf7RZ**gFW%z$Pk+0ifi*9vuVWWQ@w6Fb7ZU_v>HY@mytB;LDLIGKZl z?MVAf&q8Y`KO5WPfH_%9$?LlUX=&W$4aLCX5#w=%xgk!oNRv-hC-r=OUJZrK$z$w< zOkkSgvZzBfE8ME&r`U?O;noE1wtcscOk8pv!7JjiI*F>}9<|=&{^BIo5_++Jp${vL zT{ZS*IwDg_&ibT!2c#Uox@v3>m*jLwyXr6hqF-pZBsJftIRw3(92Yo~PZ&c&eLCU6 zr)eL3x>SU&;-eU6f`oJ_Oi0zXhbD~bJ~?VE+atl4IE5tV{qO1gkpqvD4Hh>!E zGm5I;W!{1Xo$XcFfBjb}nZ zX5eF1*Hew^;{ajf7Lr z!yZ1gDkjp7Aio zT{_Nizm!(HSY2bTa!GhPpC;6E9GO5ObjtRRYMhF7ucFC+1`s+9!IUPdoCHJG<%$EI z`4PmIZMIIJKgVLlE11KOu9b(3^jS~J>*G-c3=$68tcQlj-D4d+dAxcBYq`^4MN~ks zZc0ok6Ihz09X>4iQ1?`3{DfF5b0qy0Iv=`<-EwKP2_L(}jvu7XI`#~v6ctzq9=;~X z``c`niE2n{bn|<6%z|Vd7`$T;hL~RZ9vp>>Yagr4{KJy|KP0?VWk66%x&8}ug3gTu z>@c2zHwU3*RS>?Kn08^=5_b*H&3#lRQL6M_ zsNHAJ=vQn`^uFi&lfQR+H7;Ye<=(q0vq1K>0i3dVH(*z!rt#HZo(yjwyso>izbc{Q z`CEObu_A7z7VqeHOEoVDB`s#+T4dC;zMVDYR|H}m`Xp{>w7mKG`DX2 zFiUtHkrRfNBc&oDxn?}{1Gcew(B)#P?y%o4v%WAtc`~7DD;EKWF8(~wl;y!Z5fvP? z(NBahy+4>Uu_Qbg;_QoOeopn}X(;w-32;xAk8p-K1dMC@ZZ0NUF1~ST)`1bwN$iyW zt>C11NF#Zm1c$_|>7fc2>l_fsk9%k#MnH(ob1(PavYHHAMR_6C&?U?5J`3|X!%mEXfS6!2rH0<=`Mj6s{aXOgM<(OOY?2c4ov8X&cD5;; zCFrUG7>qy+lD<(71T-+@L)hATnAt`-M-+6p)q*iOBzqVu1F_qN8}CC(DKIX}bPXsx zX9}_cKM{SlHiWjx{m>W3a)9h9;T3vq(o9`xC~i14A1BLbjCNr;k9Jk;(LEd22fzs)AG`c1fEPp0ONU$iPK=WLzQw_T|w3(@rR&UsEZO- zx0r0+cDm(B>%9I{XMbaf-(<{0vs&fxcKrM1h5|wZva+9%NeqPN?cMBZgMBsQc6HO4 z*BjzN1GvUQeoZa;uuOQqfqusn&}Ou@*sod}Ncrqzv<`n)?~;Q*@p5l8G`QbOhKAyb zG-NPKz|?v-2-??*j&Hj(&N9njdg(ZRP*7E9rfu-S7E;Ajt>FDwm)^WQg<8EPB{R)e zBAn;i2?&|)-9=+Wf`81h4DbkVW@K3EMfc(sLyKck-x6dvy8~*P#~)A_>7vgdi(x2J zknlA)o;epD;57I4!@!#ODRpvakdK? z21L`I>t07^Iul$lkFUp3fNn2JD~rc<$~|g?R?IP%v*3}@Z`P#(TyC6S%+$e6z8%t1 z>?|WDt*AUNWbqSm)6!;eUV$bx@xxhrH3d_Fm{S=B`A>z6j+cfh7*oaRUlmwqjWh(HGvhebXS6Y~#P z7E;yPhF?f}&l%BVJm@%~ZHf0mmc#pO=tikFXjgHD1F#N(JxvqxXwpcR9@r{&XsrEP zRWzpwN>eazcs0;9EpCg%h4dNQG~sNMFv3mTyOJ|Ld9iGz@*6!Bmxf;m3T;+0dc{m&RoFQ7pKP<=HMNANT5u=@gxs#f_*=o!nH0G2F z9MkCf9m#SG7#e&B5Y?G5@9JHcmxQCyZoY734geE^>H+B9}jd1CyCs9xs|J6uueW_tRsOO~vlh zv$fpr9%-rIR=a(nY;Nvy)WaSKnEGilc}<#-N0MTN`Q^g$QLn_fy8K1#`eMapw*qKCaJ;Rx?_O`xw8h*$ zrMSn@<23%Nj@47)I>e;$;YelGc9=Q?xm|HP&u)`yf_+3E4$E`6&w0~XHi?M3VU9l!?2Uo7RAKR94*Ml%;>Gho6~$w#sk z+fLkmN=UM#(?%~T$e)6OZh;n>`jvIzZ8o4JVl`u(G@zh^r#vSN*36vfkZXiOr*8Y7 z-7Cc{F|LEHSom*_J<%ed{qfX=2hb@L3O@7zj0TNt_FCH+2pqY1l`~H``3L&X&&WvB zBp*=ze^i}CR2*#7q#GLd1a}hLCAc=h-GT=T?(PmD5Zv8@ySuvvn&9s4H15;i|IaLD z!H&bC59fBh_g2+2LL18#$0zT|yzUlHwW`>gn#ofIaogeMjh z0J_mfFCwhzrDl=IY})3|Okt$7*tojMW|68^$omfT!AFGUu99iG)oTcDGGW z3hPmL8uJeUH+7DGzgnoPCp`mxA=H>bYsKRIvRkvDc(UPM*unpV>Fd*N z7lGtUzzAq+5sfR*;nQKcJ-u7LFR*E&!7`!QF<@^ok{{tG$?|DAce|QDs_3pRYg$ry z^RmE2^|Z-u{NT_{4Vq}Tv6Sf&ptNCKQA!u}r;lR5%3E-V8%UVFD}|aS*Jnk+YFK4~ zVrQSi>HWs!gcXSoF0NbubCfD`WvPJOqiQVFiWhOVlwuDS=92PDmOz&Os_?M#{kEUp zWHvU2M5xq>p~iTan2C>lKK`3xiOG(AbD13AGb?`B%e;4aRnQ1D5_W&fKb;QR)e-ea zbe22a{PWcxQ@QSK5PFdgIl%Ynbnx%#{t3P7S-c2PSmo-@B571>ohUmP@9^0=+%}{> z5JW&*gdRBPxkf+28LeYrwRX>XA5khBt+%=spRG2nOdhWB=8J_u#U9Q;#_n+qpw7Ur z6rPJTo|lpuCh=!E1E>9nd^1`p4REIDyu-q$j4^tigG> zB{6VBcq|s^V=-W{8y{#f}L-Yip$;Ar989xk?Moy@g| z9(*8ZQMrl@#fA-vu@wDSLv)#OkmF(YX990A5`~p`fd?2@%3||2-ED_O(k^T2R@T*f z@JX-TdNOHUD!jGx#T|r>QJuRkudhfFF@UKo=ZDAS^$l>RrHNwJ@eD?Y6L32(aS}0h z<#L6to>@YJp@P@kwvZp~I~iq5>LcEH=J8mZ&KfL*`Zi+<%k!%F-=9#->Kdhv=b=ST z83yn84B4JKAawG($;2;{&hzsEf_eI^w*KJWK4j;UH>Qi!#| ztQ8Sy5Mb`I$gv9n)6>;BA=4+HNOr-`kT=m%-AJ{rmI1|CaKgURL09D4T}oJKooh~W z`5afu?VQ=`eiTx$$~gi7s}d*ES)6uZ3tJtvWtx_{cqXKZNqb=yjo=PE##j4*S=^ccF&;-A2J;35KOm@KIEZHfSFhXr>Qa zg}N}yGtLaOpyxf@qg|{T<^>EQ`ggWZAb;C=<@wY5{nRT=X|SkryB;T z@_=(aDoZy`&M>f%OSW$yZO2?f1tZ(|+GS%*x!}#NXuO_P8mtY_Lvh1xPH^L0*S!pk z45f9U#gPp%JE6KZ7Zwp88eG9bB3+eD66k3Y5$Y#yi`&%Ja~C2e`M)ro9y0lnUsiR# zyPmO!Ufz@Po=$oA_7gszvEnavzd(KqFhqO`^O3?G77=2PyoX`usaQ6YTQ{)7HBG{x z5xMMno!14?1?*Fm;>2X}-Zce`q-m!KyS>Vvf#dk2x~*blnLTpO61pis_z$U`V&Wg( zr7&Rq%gmv0X4;K)$tYs#+pRbZX7|J&=dZE{m2Cj=$tCy0aK?Qpo(xhBa`nbH}T_@?H+=B6r`1ilaE$fK+{w7tfix2dQ5ga>CKxH-+OBOm*B&%<{_LLsZw9n zJ+%TV!5#z@H2d;exTcrWYw#&F=%|rsU?L~1^@)0m^v!pSv$DF2(!-->HWU$)_>{Y} zvMOm!w$LNfSR!PQBaw*-DV4*VWenk2f^<30&xm|Im|azLl8ArA{_PJ_H+mR}C%CJo zuj6w*T6h7Ro{GkwN89`bSHsKPRThJ{fvWFa+ZP71DEcu34xI-BQB(6O}P^;o|?*~W9yLiejK8r5e^ANMm!Byzv&s&4ZE z<|Ea!)Jl`>^(*RFVyD!dtulvMHzHUV!JD3kL2Vytwcyd<{nnt%9JwQs0A0A{2;#QK zo*Eo?zTi^6jYm)L5z`U2cjp3u9ouHoTT4sJ;{96WeWy=ibqJYm{EdRmsRjxawDIQ& z2?jttIi5j(7OUZW%yUq{XZ6DPv+GueUr`%yP=8W5$-_ z*UKjv$;z#T?^0mw&@)UEztITWFzQ&BGwyOB)h9~l>-qTOt#Dj? zw5n7(R6w1i9u#Z7Tz|dq%cxA$6MK=Yp{`Tg*E^BMxl|NMbR@EI{th6$SoH64Bl+tH zzn=#|IjJN*48yqacNqJn|sjN^&JMU zheNhlI8~Wrbl=7L5c<89+E6F0nL8sWZxQI#IvqMQW)tq8kS}~GBp87P8tSRm*4D1# z{flc6$iTjp`%~U0piMqg8wc*AnNiZH6X5hCOTJ-`T%SbFas=c5;FMv zs4ElkY5!hb_zl-45A13Y{T%FzE|9YF?{tZtI>oJ-k+TZKP=ZrElPsD2*aI~+6n)-=s#ozZfao6NF zdsbp$onuwXUyE%q^DF5w3v#tu*L^PZWHJi8!tCp9F0W58K^>WbA)Z=BTu;1QET85j zBgyQ3(<5g~Z61Wlo=S}4|4dKu)*9!|9#7!Ik0+;Sc#vQhM`Ak&_Vsrf zT*oL_^DIHD0t!%GVhiPN`M8)ze_(Hu_}%rSzZ6&PhmDmDjfIbs+DKZa5vZKQf9upV zybV@O8p;nKVQqYV!bW%Ft?rrOG0{pcmvhJd9_4jmS1NF^-ny{j^Q}S^=N=%s^G~R| zP5JTBJZGgTYQ<)}=nR9?ec`?oak04kTVV!&_F4^e*`QlLikq8I=$2Igk=kmmKgPqj zwandIF5_oprf=r(e7kEMI!**~ay3Q%g<0WK&ZNQOV^IlBHz7U-MEmVI_l_*WC)pm8 zA=)m|#Uy=p${ZpsvA8E*G`QKc6&*1p^WQ94Y+r&_mR&rbt~7&j4PmsFu8m?$pp`{k_r2&DJgSTE z;jd7~;6=7?3`mBMH+93l*!WyZ2oCEt=KTRvw^Qc_s#;M?4aT#`Lp!p#S_+zLE;<;X3)^DAyUDBk`$ z^{hRSk%A+<&zozKu`~=cpfr(f3TD;1tOS|#@v+z)mZatzLt6f9yKoFL_>!NiUCCHC z+c&%ZD2XOnIP==Be=00oZeTZN(nI?fylibT{-8thLD=QL5AWp?ujNG4gl5iW5r&&Z zzhiOT`$9+#OL}yd$S^Ts6y*KKnh!6RDbNgt^)XH14ghPWc5fCP(j}><05Fl~4QLAJ zl>a5*I<0=GLxr+hoFmD7acI-7@#2;7sz=i5^>i`Wz$_+^Z?~*ldS~||dQ!6Z z?c4Vjv2P4x(~_;kF7!+?u^}D}aKFe41KKij2!{0wk{~}m!76pZbO(lK{2C8}KcWyR z`8qo{*Q!v9>DJT5qtoJ?zbf(mr|Oc-kan2|Z=whqPKpSfg_$kSNEy^K$;L~Y!xHbh z{qbxbI8w5nXQQnLJw|YWcB!E&QmPCQ_wd>1Zj{gYW8P0U+5*tZYoXbF7)=Cm3-&=grj$T_Swy%s*31qQOvo%>eh)I zXQfYS^K=fW#DV}P6>2+L*A0F4PB>U8T?Nx9u|!OiZ}6(n`P-CFF@FM+f*ur3d`x@_ zCApC(9!=GD-r`IlCr0EmQ(z34Mwx?KInJTQK4%qA!HlpfK(++8z)JyC$my!Ym1X9d zN_cMrZe(}HB&l6y{|GHiSZ0py=JoHpF=9jK{oFS*8@^d zM+xaJidPE?fy9^N@k<5%;$kZ;Jx(% zFW~u75b^F7b6)f!@wkyjODf>CsyOm4Z8cVq(9(WPh!dL8#tI(`)LWRFe_TEO(`5VE znY{OKVrH~H;0!Q>Nl%iPov}c8y6M^Kp?#;emLN1b-s;Rh!k`2l=4<$&@24^y&*$x3 z$>R!9r|K}mb)EjR(FiF0^Dne**1_sg48v zB$>>h8KJHm=tOcRk;D+)J(-)(hE22Zs)67#e|(#50AauxaN5cJ)s^ z!(R=v^!Zwr?wj~{ml+a;RtvjlVWpDkK z0)hQ%9_I{gfoReOa#i{0Hdw>~?`G4cG?>c~R)14)TzqDm!M75CNm9;w{WgWlHji)M zXXL+N-jN^h4HL=Dl6DgrD`BHmvcq#_>#U_K=En}TC>Nk0K=DNnb0uak&}odwD16R{Y!G`F%ib!6N5l5XN8=$(jG6t19L2sHtQ%P(vvNCH-zHxUlGVq<4fW@7^Igo)plfQqP>cZa zR(6?urV&l=z`Df*8Di{~i|rtO(dAK~<>NOj?!6K7#T3C+R^29SHc7GvQu4(&YYNCD z$@7sCS!0gymxUSpfe*70&xw8%U3asrNwi0_f;OXH(1W&)qq7^Vx~EcDh7+uQaVAb=%7>ihlnFZ8Qz_y}+K|&tIwoA>J5%$J=7f4YW4g2*KP{hLQl729*ZpU!A}LW_ zdwq*zg;IU@mzuel#P0sDmux@(qe3k4x2B?~j*R_HHbXk&S?}3jQ+5EYZNdmx)%9!R zSG@}pfUdqh+H=~v+E9B3_6@ZAvjq7%i?}u7m=TN`)Rosn;Htac&3z2EkPA@E5Ub@u zIgs?n1nmZOd<~(U(&{;UkTJGd-K}*Kwf#s@$FB~~Xg08VKa8>4A7KPAxd9caHzGU9 zr`$XJHCJwS$X^$21NDx^ErV+jZ8^^! zuYA+L@P;uV@gE|ly({~zE3U~st{t4=?)|-gAdz~(_`+yAu~U>CE9Db=F70!#cdxyD z%`c{x=SyQ7p3&6RDmM3EZ*WMFFa~RCa|}2WbS6wJTp5jC1o~4-J=HKf3m$(sDo~)Q zUQDk^A36gw>Te6s@;=yyC-{Xzti?78x&rv6-)C1pU^W%J)NaJ6|C+_m@h-z`Q+XHV z&6XS#^GAdOzf6=`@8N3^Bc<_!mf-dl1m7<2{k5RO?A<8hvN+iqW*K`~bf{R=UoNK? z%BL3Q=kV{43cn|jH`EL3dW4zCvS3XY=rgt%Gg~yBs1JAux(qVEXpgzD!YK7i2J-%t zTbXNkg)}K{O^N&i|I_sGYxJij$(ESQ;lx5fG_YvC95{H5SeD)o{Y%K3*>4oFKRHv* zdHv#f3mgz5mtDEAy0v5Kgu1$pQJrmNAB7ZmzFyRGkkx&LF| zm3K39KJ6~~lseKeFvHl+v9Vc$doBGGctQ&Jgz2xzFQW1k6u@4xp!%XJKhslBnkZpwbUxbT0UJ|MZlCi!SgGVl=M^4KBciK#c<3~;?p zO3|-RG4Pth6Yd((_qmej-m2a^IKA5o;EwAo3lB%>ZcS)h9VYYsOXmOLh*1fBra}>5S#EG| zWd7nmMEXU#rF+66@?HtsxAGrs5)K_BZ8$t1{Ktj2nlvup=JC3*=*GoHO7WvS6bois z0_OX1yPrDN8b}h2;5#Dzfc`?VUwlm%BG^6X5$UKuf%Qr0Ejr&G86x?x8}*R76DLUm z^%@5jD}1&Q zBjevg^yxa&?RdxMLe?Mkse~fUC$tN$H%P9_t<2RrWY5<#q2`cW1slPwGrNp!8~C$- zeY#NT^R-mp=0!oMhe}-3&wMt{4YTUf*6W5t@029G4a(D4xGx zCs*+BzP0R1X8%>*eq34xnq~m2!cn_x_$c6d)9X&l>#)*q^e+CY9nd_L%S>$anR!8Y z+&7q(eM5@yCq#UDI4V0Mn_7LH-jhq#)Ukp$HPz73Hl?1MyVJvollgK2r@8xxFSX8Z z@q*jpyeKA22b}*>L%$~`j0_y6i{io?(zC5jX5N#3s|9OEQ4>?Swa6GBDYV}cD0AGh z*4vY3@cOg_kDPH<*_Cq`F^#nET||!kgrJ=sU+k*4$I~2FGSWBy6ONq!62x&Qek4+G z5j5ns8*@&w18Z5FCGyD?~v835}H#7+i~Px#jWM zgnf?JgnUh%6*?w1il-gJyRcOArLk)co_5OdqqUtN1!I$)_3f3P!FKNizjzC3o)GVe zF8R3LYhRRR7c>9SqYz3wSU4@eWw;wd27>j!UOS{0v%Yk?YX7(V(q7$?rSj=4*tvr4 zvhFVXF?E0Z*lzs&_Ma}m^DA~{gSjV_ul|J9WsAl5veE^-bSw{ zV%g9wEUB?c6g0((H3wv7ZCGZ&P4NEF!Y8!ei*P%#(b(Z!L zGz#F7r^?IY*OtFHSFJnS%e80KGpeT9WV9WR0`0=Q&XAx8t$0j^%a*MsWPj2o#9I>@ zL%adCDv+K{RCTsOJm<9^rNb^NJ6viNpB_65>(-vo9w0q`K99#qArt0g;+T)Z#;K`p zlggU0b>D*mlae)cFS6HMuD`lR#9c7oP8jBaserj9qOxh!rd(t1ADEYOu#tRO;;BOZ zl^P42Fc1$6-ZgNBthJlpk097%dmM&UfkZxECSSnx)&+^K5Db z80K1T9_h;xE3?$xH@k8q^{e;5CmL8vRN|$boSD2@D%`Q2)U*0dOOmy@99htpVOowl zS|t78`0|?BlR9g03H%MtDz21uxl^^k3|_qoC99x9szLp7 z1L0EDEC!}B2P98_#lRF5=*?nWTB#{gSSGmuGF*Hl`ygC;)pgk?jW&P@Rl!5f@E1~dRZnwH0F>+A73pgh9HTW;>&G1+hH)~Uecoe9A?1Yx4{qiGyNxQ zxmsCORfw^|{`$&!5HXLnHa7?dBXhO-itq9`vLOi|?fw^s>_vj19-O@^mJ7gRe)a>a zve-^*gq?rx$(J$I@F}06JO#w12z~oJ@XQCGHm`$Jj>|rF=xC z0_hcXJ(8`i*qbBn+L3vaM#eVx{(iRwuP474V=Ts7KuKZRB5X_D}E)=KPQi?1+4a z=Z#C2Zgq@6L+$-g5_uq$LU+B4qf`AC7HVO^}ZgyN0^NV&<^wReHz$^n2~zWxo6{Xv-|Df98J}T9rb({b!xs zZpVDcK@dN%h|+n#=+X(x8Vl3Sx!sznuf4B6@zZ;E+)n#|gdVNkvEOEp6Y8{=P&)$n z*SjPW^dV@ZV1bV30MHKAX(7$k;K7=mC%FQCq$`;jnzC~k`8gqvF+E^)J||F=;(Au4 zTXW;>ETucvfj#$fJZ2@J>i!pVm5uCfNc~;HHH#QyyI?9OHQq#JEzgHZ={cBEv2RmZ zN)Ns4b&e=B=cBy$H z?>%omw~W!?fLJB6>gJ%AufEm2s?RjKvO0B!yLYN3_6I08QYX>;f92AlVIPYiU*!`K zzta{wyNEiv(v4y|Qp+Ptj3<#wEomDatmR>S**G+m6T5uWXMdUdl z^TwKe#HJhE>Ei4MjMoEdQs{QvWf8KA9ly=2ay?K5d&6%zIr3zDsM7^c;14xn@T5=z z=uTQ%TU)!LSEi;mt^QQ(%peDmQ%PR_CWARxmx)$B)zJB1 zl8eHi$An_JDB5qwjKYa+zbFS_!ph2ePy8`hm&4LL%@{E3bZrhTi)UhHRk=@LFczbJGyycZW=d(C|OSW&!9?PsV;awIf?zAtOMc7@Fl zkuQAr$yUdyF@6k1AkPs5aKJoY*q95~&VaL_t{Q8j^xAKkE0+g5MdU$_o-N0)AJ2+v zB^!oRt~$MRe|)cSU8r4uOyIjQF2j>QFn--!w1@LkB5k(xE$*IwFR%l4 zTNM*VwB1_pHg`y_*o5-f#?v(_>8NMU9VV{!+HtPZ_!iwQ@6UPyo0jKE^_Au(Bs_vy zY6moAQCn6%+g^X*B3C#T>H}eo?r^N7MXD3oCqP7yzG#;D%DcW>2QliaKLjrH5r4iu*@7eym~#aEYCv)UipXor|=xuXI?{l^|S&xTiwnN=l1o zG7y!w=b@?$e>T6tqxK_YfOG)-<)^>=g-xejQGuWGueCODZOt_#?JBYFxDXh?Uw#$u zB-X7~(!^*C+m%vZOF3l&N6>Kx_%_{HV}7o*isZg2r2CQg)QWVN+i^@S{+QR&bV;NJ z6=pg)GlkSDn&j3k5JH6(V0|hllppmulPc;6+jJv$-LD_7Pji~+Z5qPtun;uDsVrZi zqW$o&6LXdyBMd>N#nz<>TUuJetNbPOKdX7@UE?=im<@-oQBqRUdY9uZ@foq-;nbh? zp;`-qwq)}_fe+#clKh3}G877cflfpW>+K2Gdwi&`H8a=CF-C0o-&(ttSvTj&2J76% zZBCWwlfqF$lejp)rT%I6K+SRZ6TBo1T}$o8{-TsZqLbLvKG?n}+`q^_1uQ@Yp-Oy> zVP73K?Vv6*uyn2Qvb+!juiJIg>v^d8eRFF)jdmG8V>UCuErVZ?vFr71_-+!hyizm9 zj@v`|SAW&ZDqV~7?ebZZ<%)25<;;)%IPCd)Da}W_6v&qUF&)SH#`1br0w@2|D{d4S-2;g9r*DHR>#9;jg0vZSbR<>DWlSe^nR{K)4fmT z1zuScwpR>lIE0UxPr>L;*Wr4m>2>Mco&}5wYHoW;jXJS>7iCKYwR!>yO>I(3xqpQb zY%mH)Suw*RKVPoDhT2ZFonb_%Nz%EiN%m|8w9p3))g#{58;Ly$L{3sQOJ>|7I!9D$ z^*lN1Nnv)7z{J6)TMoh5%rA-V(#`dXCtX43SuP8QXn~@CgIR`ejY(zsrx-H!L!BoN zmg?MDjPguHqz;iPrwbZTp8JJ`<_3%EQht-~qtmW9{Vg#;n#(rK;f}F)D=eq!$4Glm z&60>YhUb?cju3+)biB@l1J+hqz6hFtk=k&?62HqHv~i-vVWqb`8d4h@e$FDnS)NP{ z0$fQwxW82;f_XBg;)U&U2N-qU_#zWZttxl$UU(T$(Z$nA-+WOIJ)ZbB*(iL{!xYcv zgxZl~G1sr=6F1mc|G=N)K`jZ8HPz|LFFBFwd|@ynUSBC}vjDrQ6WR=8jPQ_SGq#Px z2$Cry?4RhOn>z)&;-YObL9S|WA83!2^EYWHECmHaeuvuy=WZ<@>o5v3VM()bT%N%X zVhaX4`5mzEM8R;47^MXFYv@BP^i{o+r{qk2= zz&f}BYO5oTO)7Oj;9XtSFB>*j%xVhfn5Yeh=fF*ACAy-lho2YZ*s~6i@mOV1)>pM7 zp0{fngD?<7`_-Zdgl4bh9@NuO@ljL}tjx}c7Ku!a_W$~5O7K>wR0?-l_~sr7|@bS7MgiRB}<4378BdW4b8jK zCXJ@f?0flKURo9fSg&KWRs;+L43jp>Eb>rJa3J2+ugj`Qy`DwxE1YOlHU~dDGt;n0 zb~WFU$J)Ijn6!L!R?Dwx_0~othZ*ImSTccTSn5tcEOGmoG*P(NLYFGP;$8pKBww)} zq5SEJojeR^j|7$ltF0B#woWO;9;0}jx8X8DU=*rI(0riRqR*#cA1Gfm=Ang3E6L2d z9X?R|nw{7$G)EBcVhY;)jvc&sQ50eW{DUbu;C9a{u=c;&B6!d-fiHl9qvpCN5a>&e zvxyHjZ+Q2yhi2QMwP+X{ngF~#PeBK}4nt&&&{*!i0(A3kqv-+}sEfJAx(M_5W+;mI ztRGW^b3hdkqNb*13)vkt@Z>(shqjoS{0F&UYi&#>whqkk#0>C-xs@S*1lAx>Nm?*W zOKlYQvuF9n;QyhA3=90E?fe_~=%=lnCf1*bdZ8QZPho)pxFk7(qoT25}UfYE?>Mw2 ziT%w1o~I&S2Ime?4R^ayh|*jj>&(AD_9XC%Y6P=$cmBb4^tcH-zpo=YE0+ z8-Sp(K;HQG@>3LuNd~**(j^P483E`MlEI1HI=Jj9UEJR_%_B$vj0fYrvO1)%=^}Z*puWR@t?z&*7(A>=?h3e2vu=(}H|zS+`W| zA4o!N0U*{x@5e#vZ1=v(xpdU<#*zov&-aJ(>fRA&f=YszFKA&;vzbeG9dY%2gW;Z3qI zG#DmewIIjEyui|5*AY{h7c{yX2Xa+-oRv?jD}{ay2zUx@cjmJIE7W2<%}aPl zNC0hBIO4wFn)3W$H-YyMVO;Ce(KoKKg;|Al9>^Eo;Je(I z;wD!AR8C+{!rF<1B-a;tZOtgK9SV?vOcrEf*Dz{SIEXmyN)uNrhK@SO%geX4IJMBP<#mLtTGF-q~g%%r)3|a`hTr;{QLnLLjl{{@x3#DOOo2@JA+K z*0Qh;2gd8Y3K?9iLm^x)bBr5GDqxkzW7)@+A!yWsXR`9NiQTASIHy|Le4Y$(P?0Ib zvns4938c3g%lv4z9SVWKqesIyx8e!vg*TO80ttZA2TfI&9e0jud87 zaZc)y5&M$iOna$u!ma=NOW^BK<6sbv_hQKhU}g8F(KnYsF85R6L#xr&$3l#aJDDiD z6WC@5T6covY=Xi2iIZ-(XAJsj4g;t)1~Ao?T&SQ8LE%L8=+w*Qz_QA>@s?-;B1Okf zf7{gZ0o<86O{rfx=5fbKp_3mPJs!!}vXK8}R!ze}&Z}(r5%Sy1R$Nr`Zk~o8J7@$> zytt`R#YeR(i((d^NwUBqP&VZ40=SDl&AxoPFt?0{mriQvnAI($j+M*6ELF*#r>bIQ9a!9XpKaNK#7P zPM1G>(YEi|Bm(&!zsWE0J(ge$L#|4zmWA^9hWQ3z zMo5Z@C7w_#ByJz_hBm2#Qc&Se$tD>c*p9Dg4^}dV&HO73t1PmeMU4;g`X!(Qk6{f} zEvtkg1vWdD!z%PWi?us7o$tq3tg(>V{9z%VeT2nrxe4^e3ZT|GL%RtBIxrNiPLf{! zVyV@@&^-iXZi~Ta&@;b?*Zvs@54TRI>r>VWFv`^sG+UJ><}}H*!S+mUMdTsFA!BI( zdSmhh$akgjS1-3E~Qb@;cE{U;IRqHqc|x#S zQZ+rj)H|}6e4*~N&m4f$KVqS)z?Hm9)Bsc@C`<2e+@{e1~<6Gn+dFc@i8*S^!#6_ylg zzag}HVtO>j1a?E)Wxb&t9)fHCQ7E(s8-Gk=97Z5o(C9;c)_t)wTZ_DcHRQPXa7R_8 zk}tWG%5F@Ox6cQ4R$g{=2ns-XRk)}ay~2RMiv%$f0dFdiL)?IY|GMj{P(K^Xo&Hrw zTy2qCR*p9FC&VIB>R3$UT%U-3k|A^+3F24c``4v%uv899fcgJR6AY6`$T>1;;!@?C z`$Df+*F|48QY@t*#U=yzkF2+>;pgMN9uK|Q`^bsOrr%aTJ=??sxJwvb*oO~fvUP#h z#ln~^DUZb7`~hWEhGmXlHGb^}`OfH}bmr^Z-C^vco##;5DRq1|#<4M}?CeYIOB~+a-V+sd z8$UBjqi=kCHGQ2Qb{JaXDEp#S6gD6!W{3&WlX@`gzgfdRbr2@1 zom?v19<`qlke5W1l55q$!@A(MPvWB7;k`f?Dk2gWrOjw~5#b|GDx~8V$w)!|ZMA0? zP`6lAZ)jk1zQcaIMW>4W4*%+{58`pSM7q5mK<`1P>J)je@ySlni=K^pWSL?E(O61+ z@O%h>nb*Yn62R=aR9H7ORB{U4AO@&%zr!*Bt05$Ni8Rl!QTH;lpKJp=v$(*+D&2q~ zmCB)Ce4(g5?-+-vUSX94KnTeZl%SS(Cog=Z1kSpy6g3I3GraJb(1WNiI6z%|w$8=! zS2>U1A5kak`j2rJ`Kkv?7idGxC1%cFsGh_1@f)*)q7rsE$&#a<3X59%E?@80i|hV6 zx2y7mGnV`41ziSyPuSV{?5cRntZ4f2Z;IQ=CQ(yofqRIMJ7_t&ta&l%KFpI|ncYFS zth+wQFR7yr|iAGmKc(V6lTg|1cLA9-5WLU5BHq>oHQS}GE zc=EZQ2if9-!k;)~I|8bO+N3-^mc*VcYen+BB1rGg!Od-yw7wj;GmOjGI=ua6LL9wD zKR_xy|Ma|_&w4~{DaaX|N)nNw%;qZCoM){zo1ve3!hxlHPb3zEyaXXE({B+6_`Gyv zX(a1Dy_r!+xi$f_?!sDo$N%sfV4#nIE=nllD9IF{!M$;fkfO=UdERPA-4X{ zV2-xLDXDo^_iqhHT))$=Sc!mdO-R0pfZZl{a8P_)QitH*K$u|OvbQ|9iN&S}HBx81a>~xon`O8`f)l;w1(@&j(Zm_9#bc|(@~-dt z`gIL-MC58c{q4IB_1>9llgnO_^m>8UX;0U&Ft*QXyq$Mhb_j*j zRFF^i`!5Q}z4b;i5kEtCxkiO6yqF<#RUNsAWknSJu1m9hP}3|2`V6TK;J<)~7N$-e zyzoDg&Ag$bPz;hf%nd!WP$x)5@z8otk?-`(E+5?Q>8lh~xn$TxfH7#5vawB7JrtkZ z*C$$t2bt-5nsc%4{8TT>PFv+@JNdM2-2Lc6ueT_U~CphSaK;g^N&H7V^`9=18m6$+i zq3TQnVcNgpv2a=2t9H4{k{WT@!7MmJ_vXmAqvZAKm{SyjJLB@X4%EGU*$$f(ji%Jn zmSH8bd^SqQ1kHe}C{@}aGAT}4!z>aI5$+);z1mLd`yO32G>!GNrM(UK>i6x^mB+Tp zPQQB)TIWb>YHI5;)^$ymV||S`;VRv6^y^xMw|ABJmd3b~`3ly)@3+X@yE|V8Yt%sh)gn$aFq>EJ~amm8e zJAx6sJ)=Z#Vj`kd;47?G-VZb;G;z4F2nXZ<$0VH7fS??YZDcuVX@{EoND%#WWYCwf z#uB)Ej7Y?qL1&N92M;@Nwose)`KMOSDzT~H z=|XOXFpq+O)eqXiB!1!_JWjegn||!jXqqd`xCd%IPc6u--F&08>W;#YJI4c|ucj8d zh`qaX0l^Uv6t3fDa4(0W6T|aLN(7Am^w4aE=ZCYgRo-Z$)pRzDVpi5fm?GtI zDN*>a8_~z|`oa@9g7J&Nz#Dfc-qjlZ3XeY%#fFc!z8hU-9ZhRht*9Td7)(mh)+gZ-ZxoZ-Zt1L0lceqP)>+5vQR+ zq6;a4CLhcf+JT#tqc%K8RR^0&2xqi@^H`2_pB2^gWU593Di3%5tmhVKt)FcM&4h%SzFm`Ru>YMcRkEPrAqf=>WiVB2t5t(>6&z?f<(5* zZV)$VoeYr4`c261yYXiXm`lZPs5tBS9DAUzb}F^aT+n+S`e`|FAQzs5ZHCu(;QyiN zEW@Jg!nHj!ICM%kf=WmuU4xW>)X)vmokKS&At@z|beD8D14u}Bhs4mG-@NG zM`^DGp7e8RA#_UO+P|vq^9veT&KeKxwi!mEZGA`9zvmy!%-38}Qk}$TwS7_*2)wE# z%ma>RdMhlBE8e2o6c|9hrtele{?qLMAL-(!3^K)=Rh*eXhh8U4_J4Yr@^4}a{Y;&*J3ss67O=DKT0V^PeNC^4gvEJ@zaWF*7K zDjq1I6|k^qZ-ZgKG)vpdygc*WX@IiAtE8yCTi3UXODQ1dMQMmC-JjVN{esCHQnptW z$vF6CBF;-HK@|;XQlpwr-1qqcwHQ<38%IC3DZv2_|AmQ#dl&iHx?X~Va*V&{xPGk| zY(qA$MzBJ5X@LD{^+zz+Jb`3>l`xYeovZTqn1j7UO;>W@2oA0qElF%t4!e5_pANq; zdaM#bXKFgpK}l~Q95C#Yr}Srw8qk!b9<7$wPf5Bs%($H1{Tk6Z0={cdc`iWDe%e#s z@upcA&cqYFSW`42Q%cGyi=4QaAf~h*jKIOQzgAQra!XsIvukjG4$jDMGsZBR8XG70 zGz?xVDxPqIuHLM6HY!SN!IDmuzOO7^ zY%HGLeAS6^Z5?261KF4zFZp{93fB`tA3fdL-anB4{0jxkKnN(4-P(G3-sn1&hryx^Yp!M7q z={&luIb@dDRO%T2al)GXXYy-lmAg=s zSQ5W7i(R~2c&*StTWo}jd}NP<*d~#;#1i)~+b38rXqOUJs?cK%ECkm(FOvZKW%U!u zG}e!T^;=wrL;Ks($3+;VlL-gh5-lXE5pyLtQX`2fgUFetwXb&%@arLblM&21i(x2g ztjF)*%1Lg%dg1TKCgYkxBeZnWM;hQNopc5@Fk;?bH*GH8R_|dxc=L~>sOdTTDPp&g zdRm|O%}P0C0~Tavx;6w&7aGA=OQa-hZv`U#^+P|8izDzoW>+C>&i$~Liv~9fWpW3L z1-WMAcH-fKObasbEJG)uFK*poU#)P@bNC)&cYt`-l4aUs<$860DQdeC*a76zyM|n% z)VdZ_W6u`a}<^6pC91&jQMYymX-GxCXp=GdGBJ1^W{H5 zk0Py}#SmF6n!?DZis!^+P2DEQo^B$FdEC!*D;~31?L@pQCK4jWJFI*n(SX0Xxb(T* ze^3`hh48{|MhT&nNQ$1CpjWDn09A!O;(X960A_`_Y{=l7MiS5%XmX$P`R&QCv_Q;2L)>wchZ*!Az-%z)2#GeQ zx^X967mX(XBNJEw#+0EZH#ec%b2HFW{0SI2ECgN9pGY+Z_ArT}+4Mq2!yxtxwSyXQ za6uv*S0GD`_o@oY~w%v%OfIl z?d^ICs;}5s-!*ifaSv+*#A@l&8{^Sw^ZUzOS1F}RP83drX8nDoo+-IZbc64CTfK+3 z=(^7j)J~xUzRP{Ue4(vxxbC$6?^Ou61-CxapatXe;Z~?Adnt$HZIWf_el)tVAAEg? zdB61ZD+mT8Q!Qar)$jcNC8WlhpIROhpmpWwf{7(Um7t><1Ww8*Yv+-uIy1yQ+S(PI z(qn-%bQlk<3wM@hSKe!;4xe0byL-L5K4bgl`Z;BT>N>p4+J4%^{5FRg-aX;D3 zuheKRth$iTcu@*(_c;H)@Y+Fx{3@EQnEJV{?H6La6c%h#)6d=cBQS;PgV3_)s8YI3 zRnHJ*_G^|phTo|3DiWo*cPx)cFP4tqV>3e7mkppotn#A@ptqL>G&P+bLy0~5Be>Sd zj9C$w0NH{hhVReLtM%nRcpq)N(TWEo{Xv zTjy=MT>OjKx6K9sN%0?udUStOn>Y57XdfSJ*64~6?lVu;l%bN!Hc|OG^+2(3P_`W(`f|>ynd{$J=_N)G( z_h`jSgC6>oD^P}*rHvqN=kjcmAsX;OPrmup$sE_!$8_LR#GrJf22${Rv?l3!{y<}hRZ ztNI6)bDOX7Et68%I5# ze=Lv={ZiSZN60FrUwFWdwwNo@M_pTIf7vdRG}L}- z45Ym8Mw=3EXRKi!nPw)Kp0!zI{O7XVZ-6T(e%q*oBmb&iw)XAsg+kk5~V)Och=N46X>gwJ>vX|3L6;x;X zskX0fzc}ZrJX${=tQh2?%7aUV1=jcY;(bQxSYGv`#zvyw&miCHB8)XCCnWE|XO1{d zb#b|RHDJ&U8`M*44LM*HQP-fY>u7E3HaQVv7Rq4rG>+`lIs5Y&EUzyN zw(Pj-=N4u?q5em5pQ9aB!`E-z<9Kb@H3XVh4Z{VP0j#oJ93?N8Ry!VAj5dG!R4UnW ztkV5#fu?PVyS!Pug}P^onznM}Ev3@eR2)ME0ym3Xa~(3~Ii~H$KEK@l65*BtIryA- zS;D(`$S{AsF2{gqD%@p>Duy=fZ5@LFHzdU_gdCXDTh3j%$co_Urv(Y&!vKlzQoJz6 zRwZ^VLviR&Wj5d5O!XxuDP%)@)$`sM)H;5`?Q?5I%u6a!=LeNEPK%;hOJkYCz{hJ^ zRwv;@6~9tarTu5~^-!~JXm8Cd5#K&>c2-SMk8?#t?C_{re5&UP_Bx(@iz&t}%(cqYlCvm= zD<5=&;9qIT>FGnV5XmS-9rE(!)Fo&oW_xM>6m|Ax8F6caL-fu~ffO=ZHNo<4@s$q*c3dolSqDm=uP8 z&oiDo>Dm9(GM|>AgilaY*FeM}fuz1f3hbzp3r+Hs_ij?g)y%=McHK^pWGG$|w_6ep z+0@t_h5&m~c~IyzVl+O1uP!?s(I!P<(dAtCnIHwTS}TGd@t%}KUF2mc>jLbW7~ zLDu1W>!KxNQU0yYC&7~uYYo`g=SLV8Il%>B#}|g7YpmZwwR_Pxd7Ri>A>C*qfFr-_ zWsgTMW1XpBMkVA(h18zDnr!4MA%TKVh2=AK3!Q(%(k_lyS4HJ5I~xwm+n??Zp3ae| zGW*}KH~c5;PVd*4`FT-ed9Ce;ASh+u_j$%$w3tB~z>xW2hz1$2(-qRt0n7@@I0fo| z67dy(B{3dOw5om3Q=KH+3}2*lY+c9ab$;{q?ZRBOsVE-61c=|_|LCnn$2j7-B8&tN zZo-8a_@De7`uh5eP~teyWk3aeJA%PpRj-EHbHRUN**CV^a8?aCD4y{y!$fiHh73i_ z74E++_gsbt%rQR{Z|fpeKs|?fzuAN=VtwrQ>5Lzzfa;gdOw&p);orM$CdJlrIwi9u zr>tiGjgO?-FCzXHs~s}B?leQ+Mf7Ui`KACgi6VfvfkwMZaVHZA#qE~ge&R>BKZM~% z9f_wOpKt(p?!UOcRi5~*_QLgu;6=+yw6)^Tc7iwBYaIm8fj5d@dY)L;0X9d&!@N_f zs@C9%3?A#6o>+yMibUOG5;S)~R_r`M+wKy$0mQr0>Mm{cjQ*Y?DT12UvD9gz;w;7v-zl@shGL*>3i?LJz`J3)> z`+PLTJj}`$!_y|rH?>3SeG0nk>nol06#N~7$|`0MTKMus7gQ$5Z+^41sM!79K|Pc2 zaL&3171s$)bkp=GX%F_IY0&Bm@ntnPS5F7J4IXp$&8t?~;gehf8?R9EX%_+6YW}PI zVM|5Mge+}=cT>_6G1P%w#i5=n--%0}8DVpjvsy6At|{w^{Yb^2MBucR{d|sFz|I%| zoNN4v9A7hJf5OHO7u{EawOhKFL77sxpJv)8NSA+Gp*3PWu`&iDwwxfa)wjp{HmXFj z$8M#zc*clNYL5(T0x?_LCG;KxxMsz9Urc&C3VZDTyUzl^TWK#TNNLr50=aJ_iV0Cr zbG@TKymu0Y1vxXZgZ2%bH4Zy$mfAf>bOXeT8Fco@aT{`#u)`vUcs=&XO4-vsnu6?vcfL7&3Amq6z<`B1m#r)cP4-{ zNJcJIySite5T~MgI9s}S-{*~&>R1+OpY~!!=)()+ro&_K9n`i8UT5zS1b`ZudO?;v6j0NV0i{~(G zihj~vX`=ONh#<#&d|f(ZkzOrew()O&wPz@OmlsZC|3L`~38(ey$uHY^l14&_Eg}2; zr@b-S1*261Yf*4ZPFSQ1E1h|5_SzMKW<6GW17C`si?S57nH511&D52X*Y1KBG{b93W>BqyZ(_uXf&K z1uNLQdXq5N)AAEF!bbHIIta~pXmD&9dhK_x9K*`rA+_JBiu+wD>#YX+tsk1(*N9F{ z$o%sCnSRF2*WKOu1Pq2Uzx(=%57b{mKk#lhS}f!G$f|H=iL)`mhI$Eb-E(#Qcs%Yt z<(A$Z5d)$ydCY%1wJW)ciw<;YCL}T+baGWUu{IiS@3U1j`#khA7hhz;=E!(==6+hC zxQvawtL__~StKb&u*dFSJ_xF*w>#s4QP?+qf8=2>m%xu%4a;j*H^yD{m<7c}q~k|) z2p`=%b4kM7hiH)ahhI~H!_@j&!nVOvzw!mx@F1OEsJ3m08iE2$FFojvqNx{}7x>}> zDp)MKhs`pY8w@#pr4uV6+mC4y^*qL|mr@8jh{Gb6ui)Y(7Y`3jVDHc$A|pW~VR#zO zYlnRbv-9_7@dJ3S##H!CX=@+Ep1*fq)KPl)WS~rC7FoVkCc8>J>$lK1j|z6F+h-fU zlxL=m-w}ioD=S1d{9IYK&q_#qzS#dfzZpi$4Jj7#sZx5Bk85tqb*R+om;|h<7QB5x zdLbS1k9V(;F?RpMg$c=R>ef8zhOsH7YP$Z$Bdtq1A0pB;V`I5>AzNc1{pH(Ra7u$CUoJPKgS%pFbc z;Ky^PZB$G{JLlS2Z$EFHxUB*GD{5Vh;e7QkScmWPA-Yx`29ZbgnqV;(|94)OF0i^{Nm-qJq$uGa6T^rl5&YKfC(N=!46c5tgq&k;x)KJev`X?P<+l zo@#k?*sVTtwp?$%n!5`eU$M~dQvdPjY>dbK#_oH^h1Re)me$=lx7WHeFaU!&Xh=tm zMNQmaRyXzvJf}24jkS#Thm$L{qaph-FP>+ziW<6tW6^NP*Ti_UAB8TPWDQM}LIzD1x(cL@o05M$Js$a!Q3 zEd>6@)j2QR4Mq5WNIza4fntGW$aZ#T(rl2FOy5r@tQV2^B$mGsgc} zeR1+{5NA$uelG&;Gj{5=#F{oBj4b&~#~^~=9%zvRuTorX&`K}tn!fb7ABwL<)FNSG zBblbwa^W-5v2P>t9c@^vkdn_Oe&d`T9cx6@WdFZ$hxl94;um3MaV%#9K?#zrYKHJC!8rY=83nEs0?CG-0~x{bmLbxpK<}hcVq4&(Y06jd`&7tbjUo zzGPltd2Xngo)++@BK56EzSosAGjXMAK$|?>mQ}desKyg2OPTg%CDYXky3|`*NDPz6 z@k`c0ntxQlxy?`;aRr|Mra9DThW?A|qGgb1GM1D>>6i~jNqTuZ4HK`56zYOOZf$nE zh_W?#{`!Iud@^aqNG8=*S{juB;OM(4w|kUbpD+ zar-X42Wjq!x7%qv<-5>BxT~0k{TbGvN>UAv>c#Fo9;)#johuVyVo)42 z8WqeXX^0x1Ty|yUueNClk}J6Ar{bRgI1cZT{TQwrit?Tvo2uk5dLeJR=;nIGT;t$3 z>qx9dk)q0}QbI!gAPRhPnCVK>J9!kkGd0&8fi*=xj2&xUiXN1X;|Ym&oAO}**H~s= zn`Y1>waO>WH2hwqqn=5fgYZS0_wYX!Kcsv3kBFPL_N!2L4F-!mi&q&X|+#iDS4l zV{l<|^gktS$Yq8E9ry z6k$}5F6-dm&)Ztc3+i2cwKz=pXcdOW0yJcoQ4L!A3705Wd365<3BL*BFSjj@FoKH= z9G7Ya>Je~fZIZ86>Lo!DK3GDGqsBGFz4wERS#ssK*@z{!M54quoP0JdfHJvoF6g1C zgI91FFi!Q4|9X>Gu-k4^j%#5wqB4X2jmV+%2W#y*i_-lG2ZO}iaHUOf8o%X_N3*}5 z4yLL@W=D|QaB`HI?9;(S<;T~8X=GEXPq+!b(gEUyBkogK7%i$0l>O-<;OuPHUzw%~ zGN^%Gt#NbUhTJMhYV@+zG-QA_!=~f5kACy9X?GxfBI;9Y5L$GWoyucTL&M6w_S&;- zaQ$xt^LCWC0=b6z%)CN?_bhR-sj0Viy07@O$G*rdpcZ#2c_@v8D8r_9`oK-sfG_Q} zmDfh2$&9Qsqdc*o*5yHB2P^C!?bqEwC|rp+FFlot^o1+JXoZ<5fx3u5?N5+k=ZB|Gq1s+L418$PFvc$FqY}8WgAKCa^O}Ztqmiqpy(`sov(UX|z7&npU z^94kSPuC?bm2{C^8cKBG(%3SVNpu145!nN~;XAe` z{wFbq3GFmK*Ai)4+{O3UJCsEjD2%`hZgBv*OgUNKHb?^A1PaAF63>guRk%7F3;3)o z)^K9`#^ZK^IH+frje~J)#Ydqb=Huy2ToaF7^TY~ILGtC{au4P?rhQDHaW^Y5{*p$h zQkJhnTnJoD)`~mXbaVqvqmHXzWM{ZzxB?nt@dVS^L(g`w^C5}VZLe+obA`3lo>PlG zl(fIU229EiK>N4Kc2K{t6Sa8QTTSTwRAzjNo%Y+f00%$sPis-JBC;6ivgtmbZo)yP}9c5(P2qoQJTHbtZ8ZiF?wSb%r8M#eA)0R}^%l;Lmg> zB5P-r0gtw!VRDm53qyRGV9g3(gkME2tHh}*;R?keL<8h?qH}pQl^3ak4#gy)$Y?Agn=~0();0PcJOGO z`MsuwHg%ftjPwD>f~bMSrQl_fB&|l>k`Oo)#~oh>O&d2kVd1&`J{m#bCZIi39Wq^r zJt8&j9vCvBsD`;k+B@=)db3#X1^;Tb^77BaK*P#ayHyyG>2FwCmkKZmST_HjYQkUo z{A>)o`0}lf2PBjcOxMmz>=P(S^dE9fHi1`Im z*LNtXl4A95q1SE<_)RLNA@&q#)X=O*d%#CkPSiirCcOWWMU^g*0jueuB&M+$^c%AP zEa^Q)%P|-591@lgJn8?qrY`AHC+7A>!_fIm)yD-8j)Fa;)qMnD3ADgkyCLW(JxksKA|}EE8R>YWiU8F z^^;6jQ*lpw5y@u5KU56B=OLGcpMQhp;#o^f8B4Vuy^l9z@>w!A0RkmWr=WQstf#(A zMQY+-yCp!89dDM z!ZC*EXIl9+SUVNPG7v6ou7ykdPvPn&au518(;=`^aUi?%FmOT-%%*0^#6h=802*KX zm$5Ow=C#qErRKNrs>&}0mo~ednvKpcryq`|07(pSNcihNf>q@B^5vBr$QBOt%hNff0(Lg3 zXG?%ggklEkG{^R?%#o8xQ%}#mE3U%wRV#;^$aybo@z!WFD*f>ovy(~_vYF+-vJ$t% z&7JiOlJ^Tv&3`pNO5a^}uzTS9{572f(<>+%P@epS`n9K^d_=QaZZyx590?rx5=8GZ z_OCK?3#a>ofq?7@7vM;fnZ|LYeziY_{PXiQzq0V5E+dohvr};he9?p2t?JUqzv*Kd z8ns9i>*0GC5}nEH4zFI)ay$#(S?+hfatUi2R zpEWkBh$yKPHy_BOdMq$L?P&cy5qO-XD5i|XgxRduVh7zzZW{;J1W`ZPDPOwdKz z6gnK$+-QF9FYI0R6pa!$$PxP1|M_{P7B?dovHFBq^`#jX;b22tKi_6OSFb)Ec_gLA z*@@pzy9Ub%3@%N$5EStL9|FU{0(e*5VCXYzc)N+@$wLAvZh@2} zfkRpTcj45cF8U?#P%x8`3%*ycRS+Dyq|LUm%GYJG zpjS6MNO)QQ!KTet+>k@1B%^M!fA!xfgMCi--qrK(Ynmq=n!Wj}ZJFa7j4zNP;1B$q z>y?&0hw27T{`rynxAXh~GVQO^%7(oWu$}_4NuOit7w8yrMXxY`AKa>(pGP-e^1s8X zI$h|W>ERJat-DY0*+~_1{UgYBEEd|NH`mv!x_uM3=`Q7Tiibg;CE))7a}*q*faSAK z>ju}i8JVVb??a(7nrOw{-YuQeSmUaT4g0JIj+(nYi z6k{4Yi3LwwVw0a+|DgnCTt)tt&d_ei#27JEsD!rRJhw_0>S_{Pf?cPNsKT71|u5$ zTjn76DHk=>hHz4bX=UqkJM|xUl|-BO0)Tq%AInEh4TSl08uXZ$8S;7UpGRK*{eLfM zCc(Uh-h?~~EIag*JNszPz<8|h-x0jPjAIU?bvkZNN81ruAhP1LW5;3SR9qu1>(_h0 zKFA;BmNSu_{xe&&>D@Kb?q`;PU(~o5odK^01uaC{PUIb8C-`u9s2aP5<5K+UJ*2DI zIdOffg#TLT2v^#y`*u5Wug01~%ejr=$s6Mq)$Ef9hc%7g_JYPU_3g(S&%*x`s+QREhvpq}#4=5a%7IUG%mbi96AEqF45B?q}L~`!Tzrq5H@5 zjw<$}_*FE8&Qk#9N02+8!`w&n<6t(Oh82x`%VAvC-qyN_M(jSFsisn)p;yKS>t&z5 zCNjYGmT0M^-r^2syeto0I};PKbLVIB-|dnNE`uGEIZQCk23`F8=YBQyCHxvpj{;Yt z{eCg8^`70&ao**-I`Z;~k938e{nlH&J};xFW6>ZIDbwI7t!ic57nXh*xSv4p+{#IR0Z}2V7;)Xn*E6sGsfd% zV~q{o8|V0$I&&O;=mBjV2R!XQGveP8-ZupJk4l(9uhiI=s#Db8q7$E-{4|yk8LA~- z&~N`WVwRvEZj>QZr@u!UTiv#{u>y$+4h<4gOq!8I23NbxbphLzZdj{A_I=005&1PvzTwr)W z{&=0W^1FXT*LL$B?-h`~<8Hqc*P#y&gZcp&Q zM2yZ=xW3j)_E}i{A5-nwBE5#_>A!DLV{K4^x|kSflZ7}jdzk38z;XPTG=|MqUjr9y zXVgOt2wPqwO$H&XI%|iI&90WC^FO{0gS+<^=66Q681kBz22f+jwpe#?BWIGT5g0gaXGhkbY7LWjcosYi{&@4h`Z(vc1tFI7I6NrePwF#jGws za_DD-Fn=~0;gkRKLM+tbUcV%XMH?FGC5NeWwvrWJ{W`?f{vtqx+2@Owja?NftC6xB zevWLMr9ON^GdtBWmLWX58uXbfvqL%c{a$9;TrWz#g>Uv9_UniL1dXos5<2eKvyR8b zj@tr(`jTdGeQGJx#<$8Rz0@%oDrKh6=A-F?{(%6EZ6Dy8$MAQR2Wo_=YAndss`z=?C5n_LsK<3)WoWyy@T(UIfKKHQ*ww?7 z3}Qf`L_x$|*JXNkYeXCr#dt~i@LW&|LVrn%!uH_=!Iq2|jZyXe`+9&^&msYt)Iyoo zJ$ILNT_DT&X@|GgpKk2wYGQsn0)M+Ev$?KSg|98GW1``waONCK`Vo1aeZxiH&0?wH zpYm_1DSKDP$KP$6ck2bKnwH>L@bkJ_Vc+Xi%>n#TKh=^u#ejB1J<3R%WzW?K*9#*p z>}-4>ZM)@-mC#^d@cefa{DNbLiaNmBxEzI=x|Wm@e%@V9f*IQ7KB8M@OAe-`?!8ch zb(YBg^-kB=e0Qh~@BWi>abY!%6DR*)P}Yx*PMF<$-bqEQykmcfEi2%1LJBnhT=6o> zkwf#DNt+BnuCXh#a9&0ueyH%YL7)WsdBpUQR9X+9vf-hGIXukg;3Bp--) zfyjF6+yT#doBZaBU;PJED#pIwy!jylunPwj&&!ir>^){-kI^Sq>8*}GM#gOF4r#-= zUp~#38yt*!6W8J-YdBbwE;QJ0+RWA3Qq@>7_8nDWDPp}qXzr!_zB;=q0sQz)S&uu(JY}U*bw5vl38>K? z)m%?m0p0^o_xVjElkMd=v`dTL3W9v)wmBqf4Ek&hCsWowkL7(=cZ&*kcL@Z1_Cp^J zCW>=2V`{zyYdQ}oZ+ZD@RV-VyH6PgI9myrK)CSi$hQL9=ruiS!ex$FBpELx)ZTSvK z+|t)iu9G8n&&M#OkmwP*vvr14b<8=XNJqyGALsg>5s8K1%0M_NyMFbyEg&v=87f6% z%JA*tRv5D7r|UL8Xwn7e$s`IB@d=hJ?`kRWhvDl~z3V)%VE*7p6p1VK0D%zl-^rr*6zj2dh0|U%vF>W*K)m@Y9JH6})P*y3Psfpj; z`|pTl{*_5Kx;BG0oW2$nXUF#2g1g_(HJZ&M>$LutwU>vBllL_xDj$pTE=?5CBHEH+ ziysX~@JyJVFup}m;<9V)&c*?A5&_WNG^#w5NU>^8npy2s6llzH8Ye^N1Ij>J_cJCFmn zA595f?%bbOeT;4g=#;jF3d5`l5X9dL>`>tBdQhj5xZ9M24NAjHDW&@N%OwPXa9cAxfaA>2F`Cql(-|Ws z)&{6AYhaspQkzKF9R5}o&vNcGl2w>pg#zDeW^(eOKS}aFhAB$Wz5Xyg#7V6{xrUjC z(GLf;g`sQRXi5M}#c{4PueI&`PT%LA!_|H#GG%|@Y2@o?8~tGaK=j|y-_hvID=`BJ z@oST_31^&D4RBhj$#kVDC2q##`}}n^mNa=6@H8UN5=M7_h_^ z;bbMBQ6TY1a2#Ov5up_75UP~e+dvfL*TXBmqUi0YU6`w$CakV`|G%Fp5<{5+h2`#l z5(@0Nr9QbdW}cIlo5rUU`xAX8dH-#K53%^U*!4%@;Q#}+45h69H-kCQFEijy&40Yp z@{GIVRdnyecVFGEF#Lx^TU2U}$GF%6g9@j#cCq}IJT8h5qApV=q6?5aqP6L7`}e6< z4thqR>P&bKh@ymm7>pMgpfJ{P?g~jCgsf&Typrvw5eEJBbeDD%M!Z1hE2bJDIGl#W zq*NTNB`ab=q4NA{yrEt35p{Kj?|lTiW2@dghqGf zTwFkcax>Id8Y4Y;WytP{=fqSmqv&oNlUX5UNG+ul8P0{g_4kr6jHgQIcp*H2Z%Y2O zOyVi*HFidTM-7N=yIcWVQPW1%SvYnWZeB8`@vUloVjkd8rAWk01`>C(Y2V(tZX{e)I5}q#tu~o$jQF0Yhhe^`Q>3wOgoI zdguJSB4I`y_Hp(n5wUART%jC+*Im)ymnAkwmk8vg_Gj^VhTV5(Rc%(l>a;mw!bq>G z>Saw*o4^Z*U`yJBC;nf$D5etGQtF6sn?i1U1it^tryE-08}E35oU!zs}#r zUMFi32ib(itk8mebwYdnDu_1WBx&^dEL)ZDEjMxiex&u;8jeBCLcOTpX(z{f+HpRq zkD?K$mgUTO%MJD49L$Pey)&*foP7|qwnB$}o(@VXJ5kNNotw&lb!>YQDx~*dCSbSLk;ZCK3DZ`L*}!XHk5*kc;me=u%2@rg@q9ewfn8D{2v@fMlv;Az3pbctf2Rlhq%%lC06vmJDd78>k zh^!b4eS>Q6w0HDI(>pwb!!Q8@+WFM1M?ht zqg38=?{qFDAz%r$r;e1J{}OIt-_{YmZ|Gnchgiy};MUklD=mM)nce+hKpfYB7q-O9 zyrPSqDv)Ai_k6#9U+><-&P+L=7m@ITknr=C@RBQmH~NkL z?op2+vf z-jP<;OP`Cm;oo|6+_#}9h!-~cXEh>0Rl=#e50%ojHWt709v0e^Jyux(7ud*)1Y%uX z!9}c{=5_c5IrgMiR?XdLA0R%PuTL0Zuh52(n8w-HLB^;M}ub1^&c7Ugbyei2o@54 zIh4oW9nVB9=DBFE?%NW2c8)*seZJuA>M+tW**O2w(*=O6{KhP-<}+n@uhp16QnN%T zq|G#bHU897vG#zx<9`<#{q&R7SHWm6S`3M%5o6)qF;9Yt-1`K<=jxa`#-phRYeVjF zoA#YP*+tgbu8wMCrrBl!+@d_z^v-``i?(>rQwTEv1dw;v3YPSi`eKz$Zn;s*JUhmd zOjlC%_7`T;{P!7dvbr~5_hP2m5w^l%Dhu0^kW(DqO3119y@qB9O3NGRzIy8(zQtOH z@0b6kM+!#M4_Tl-9<)_zpmHw~>HSwdXbWrWAwBYNk2<%J$2XVXB;iyd+5#SNBLN=2 z6w`!8p2nVUJMhZS9IK+!GBb~m%&+ZFnIiMxH`=bAbHp094%Qt!JILqzIr9Q^&9qaq zHvwQC=x@sL~Z6ZB+NPs%Jmu+Ml*!{jtP0UnUQovv?Z`g@lo(oWb<^lrW( zhdFoJ9S-<&(uCdpr@05bRgrqCpyP(H1#@^36#~11(!=?WrS?ewr9Oa)+S8VgNmf-J zmIs2*L5pfh%(N(|*e7i)hce`)xc;Fz)ZNzJjjWtnXe=t_F~}{);g;M?sjU{;@1y+g zTPGdH_iB3VOpa-|xCm&LyQY(k&aKyXQvF$-{U^2;VxU*K*?RZkx(j{;0Po9)#DiEw zlBkdUcQJmiUMIA;O;aIsCwo@uH7aKEvELyZKT$9KIXq4kx=sF~_l3n0INdPR)d9i) z-)CU};<=YhPVL4YAzP?6wyI+JtnLR>^TZaVyS)QK*;pzg?Hh@8~o0xv%`xhm22(pJA)=|e|boSGKlhniYL2981WS}fW|wbY+k_>i>^HhpZ4K@*U? z@)x=@#J;)>(Pyf3w$$ay051o%)o&nszikfWiI0Fo=I0)2`;{Hx?T)@xa70?tR3Bnd zV$Hn=)kCl-w|3(rk7*<7<-QQn$T1TNlOTs~ixTGpL7Jd{`UnmGN}?ywxwJuoRKrkZ zSt;3@-TbJCZniMMOnQr!QcA{ze37U)9Y~L|3Cd=4i*){Ksvb=_Rk?KDYhmPir?28b zIaGn8VMuBFfK8^)s;Wj5hk~C{!}&x*=Td)@F8va7ss6bW;WE!(o_48xaAeDAaUf5smxlp=) zw8PeSY8$-6VbFAVCGmLUDsdlhyN2RpK<8o^^;H$cLwNWpYWmyquop$5(hwU0cqI+a z?4d9rO(ijAzpb8msW6%_uYW10;HDD1XzhR9va0j(|1tFzUQxea8}1AZ0|QDoigbwz z(hL$xw{%Iz(47N_l$10G4Bg$G0*-VJ-Q6G^p83ANbJjZl!YtN@XYc#EuN^q}ZqO63 z+3v%%SMghTmL|J|*Z;vYYw?=}S*SYce=AGEU`(qOl^@c603$;~?y!p*4*IeFY=0v6 zZ1X_P($Djym2GxL{_ew>aTAAw*?te+)83bK4{)wsQ(&xWuyjZ`hta`rEp}be?BRk@ zgSDGBz&IKdVqw0sz%0?*dQ|{|e{$DctzbJ#O8c?9U&@M?HOahf1)K@Sc5O5ZOfV4o zwHz02_r>XPr28-D{RK71K#_9La~EznQ(l@7mml5cy%gOVG$;!joG;_z6oS&r(YnPk z%*ILjEb#m@Nn4LIR!o0){_a!_>-T#H|9xWT1Ph}dDEm}BBOne_Yy~S0Q=fSGz`<`f zVsMf!53e7M5}$p4;)c^a5e{7;UKZIMi8ndw80(XLN|lq4O@3g_hs47v_qgBtFtW*b$MPgi^qsDjIAU{?9r|Ao2-O{9Cj3NHEb{t7<!fusrSj&gG&ne75l{<}Q-RU!601BKOF9OW{W`?Y>z0~<|E zf7+_3YBwGi;_A|`?MBjVj zjycyz#v15-AzwLsedD3!>*IQ0_jqs9!x7@RZ$ACV!i;3aa6t_hh!Z!qTBlzmfNva~ zI0)p+M4NrHy;Nsv796h>gx`*o;wt8QEf3ZpdZMpw|5lwAd>DFR$pK0X&Rf}G2WoUH zmKWlS#>09ta}NF&IRvyqW!`Ewi=#!@+5A%_P!P4a*LS1zVYh7ntqJ-67Uy5NR4%x> zOkQ;TIzVgUFIzS%O)8%$r)r;$dF=OgJyE?N_T<1kIcRCnGW3%U4WU}t+#@bapVB$7 z&Q9y)Ga;uU8~pL|xZ>y@qnobrA^uj(pI+2IkAPMXsoP+0&6R%_Ie~vc2hg5U5AQD+|v5o#EG3YkHxi$6X}!A=v~-^hB@6=s&heT4V|Ft&}S|KF3gQK)MwOGD+~9);Rola4+*yy%hvG&it~3iG%*q zf%J8o+oX~N!t$`DBJ=3S#IJ%PC3n(Qf+{j3dT?_@(l*H^3rz49TYI?$nwP-AVX|2& zROBm-kdGwitS8*zsmGvgqUE#y8eDwue)JyZ%5dH{5+0P}X6~blm|b=fHtkuN(gxkg zB8!o3t6?n3B!l%#b9QZ8pI) z!2uZGXg9Y`bSnNhDSlgC7lLLEi&fRxw*B|?D9Y7Gd72u$4jVkQ9GfMPpR?ba$On_U zY;e*2+M27iY!!046btvv?TbX6VKL6a&@|UbHu7ASe2~!?YZK!025bhHvRpIStcEVxcnYZ!ll!B`-!jqMc%W=`#Eje5^EMX z$ruZ7dS%m;%5rhYgGvcK{-&&MD1jNSWsfZU zTJo2(Z9xC^?+uMzQtw(KZ7hWcZ0I$O8D4K5py@y3+@Hy>?fuq{$;g7&Uv|vB>+;p9 z;X|n0XtHqKyexnvezg-X!Jx@I>P)BW7oF_y8BxLC)1jr`Bo|H)LusZt^K!aCd|-B< zv8oFexPv_GXTBH-k$|KTC_=t0Kfx|5L+DrkV6<@z@_2^AuCw2;88ZaTJL^g$ zC}B<}tjeN#Xux`@30UAhbF#mF5|Sdz<~5#n!_WK(`(EQ?V7`|oQqpmGFr(gHt}!yA zoc~T%Z6+=q!DteFB)ojF_r`Rtf_1X$DpJJCp|>fq5f;&g5(ja`p8dqh`gFg?IwP^_?BH$gI5|NY(wmaI{FD`7VEJ zBkI2Adnm4~RhN5gBPXAlhCM^bDFqcAObOu`W0GNgLJjM0N-M&CzG`aD`};kEUC>+-!MO#psA=0MT;{j7u9?x4SaUp74TiONCo)3<@1>~s zSlf={BL238lwx#m>u97Wk(Ii}JnIv+F#CrMerYuE?Jk>Jmc|b~EEJMOKO$$~i!a+MZIH`%kJS9ccOSvejH2ydN82nQG{40lxM70}?sRVe z4u@$(v&$;1SQWp3&&TL8+1cI#)=6y}Y}Ip$>+P1OzJX@w-`l_;NkVjDtEbcsJWh_D z1seY7M3jjuDE2|W7|$WGd?LJ1k#4UF-RttoN9|33TW0V&@c=;*!tH%`+mR+WmB3)zN2fcnh`}WdEkK+q zlmC7DR)f=lm@PBY&h|4!yLDtYztbX7;vUw#@g4Qp$mQ=TSp|)vpyGGV| zEz)PTX;!w&c`8vI^ETH^4C3gb`O#mk0oWxA8YO-j#g(dvy<75P8-=d8_08BJO^5aB zYf;`C!_ep0T8eT}4|E~$#%1q6Y5Wdlwtd)mn_-OwYNU2>5yJm>d4(c8(j0(f)v8X? z8Lm4sq7BeVaNwXQ>Rr*(Vr|NwnwqNUyK42kF zJ{81(>Nphh)B<^ytK+V8BFvG$5oa0-`j%yO^tW+<6zs?aj=c@)S#3u50ecA;U=D-T z1ngQ8eok~R3=ubY?1(WwjkcX84Ex*5Fn!O9$~QK}V%mxiikT#&tB0n0AoqSuu7ISJ z#ACU;d@4CO$mJ+gy=L>Q0pt|2nO(m!9P6OZY6eVQJel8yzgA_>s$x*0iYHTYHU(V02J zRvf=gGF+y*A(KY#yN;NY$AF_{=e#8Fe2Ci)8i+BsHoOU07t)UPa?oAE8mgD_DyLzs;_<4%-; z$u-d^0XHfwPr~c~h0&B6e!>{SP4QK&Qaw#vAXH60Dm@rFt2;w2x51T0oy(f> zD8|ydH&+bwUH`H`3h@H)<~{p5dP;ITMuIOR#`9AJz)SSY9MPxZzEW2fHAi&}hoG0+5LVdPo2ym^cm&^bl2l-z>IyBhF$N^XGk~5bmD>Fb zl}+rDIx#A4zIER?fsIa?1Dp@(R3m=Q^&!X;C-m}KHsIvmv8*slln5uhzZhmQ7-Iq&X!f@2WK48NvO!vR$&cg*Gkuh!Q^wjj_BNF#IX{Cv!r8)$wGXi2Rge)w+bi_HazkDRR2IeBT~L12(v= z82*`kQRfn&73!`E-Y0A^*Rt#U5W4u;M*k+fKT3e%Hk@h$_;N#u0}6U)Z_wcspU`?B zs4eCU37{@KV=C!7+{4;J`T9SE z&XDF7rw70Dpm^^YCfyj`CR)gVj}VL%c~3$=EVkzM^Bwaq7HwhCg|=e*@e+E&gI}Ms zcf~&YwZqR1^1Q1hjwZmDUOc-hyBDM4CkOO|FNX;+q`*punx>1Z1&4dH_}}ca5AP<6 zpOg9YaQ`nrE(#}uGwzgdOkrECoN`J{^jE`%d%%%U6LMWLWRppI`~67z)mfB0|K}=e zdWkkJJ$_>%NQCVjINAMtMVvXKFHV(AhFx?fOA@7b)d`daypsnYgTP% zTQhQ*SOlbyMzpYuC3ow2v$_mkMFZ0d`%jR5UHJHMeoD3%lbrAjH>mUzujxcxaZ!^3 zUW=d_3WzHg2Je^rxVSnz3I!xl^OA!ck-q!~htM7QYo zktAk?3KS(xgv2SdI}C5&0>QTmY#%x~HMbIN8=I-#og7WR{q-ozhLel$yXsg#E1JaN zR-t?KFc}Zs@5}CxjnRd!0z7$adp+(eB(IL!NRxbOK0!Ppe6=K&&ecvu89w=pKIMW_ z3;ui#KRY_|;2CLHQ>!mO2DI|vU>N#?mba-==HB+>G#xV*wdFG}KbsbJ9vGd~AUns4 z;8KU|M@%o!yrz5a4d;+#`2!wT1A!3e#C~LK+(sQ1E<2TV9$N=;M_|7F7eS-I=-<6) z#R{lAlB~NMw)fEfQTK-%`Mc$tnGYB$uT4ARZ<)Z4+&aY^0j+N1E*YX(Rk*~<9IP)v z%QDS($TR(Pe(SG_318mCI(G(32h`ZFXR# z=eL`)J!xy~sc}9p*NN{Zt8{++Tjs>BoJ+?Vw#ZzQUmDJ1*zsG!2Jk>lzeHjzZitxL za4lCR7PqESU}vlCy3s+P3W~)%2@Ai`?o zJ6%aX)1d0qSQ2w&og}0@oOh8T-iow8$s!K()rBhW?Kum_`uDgW3UZq_UG4MieDW*& zM{C|(@c1R1tWib2WVWgy|5C2@MrqzIp~RSY%j-YP?0-v`DPPd*6X8;Se9Y;;=mRuE zhjBBLng09Mp%OO!cpU0gp1;2gTd&mUUS?JWO5QcELodjlf!}O3kZ<*z9U;fT8_3x%9HLraW|2%^^2Unkdc3Gan=+>M`_aEVgLp53fTR-e8UWQpS zi9y+7*$O$|O_gt@fL!jy4_oGUKUtt-j^GzF^)2W4%aonTWXZa53`^}1y<}`p&cUO9 z1=YFAEld0Z5BX_g; z{jM}<0Vqv?u02a$L!0S=5{I~!Np05n_Wli%$kmWfeb^5xPA&DPli;B&Qd|{#Pt;3Uvt0xk{+K5Majk;(9lupxW&H zpQL#grOy?ag2t_w0_bM({f3X;UVa!11AE{P4(WsG<>?JR%V-iYSt28MpPF0h`BfR; z6@nH~X}Mj9NNB?EbPkl|;jCEAwoGuftgui%#Ji4f>L)tWRuzZ~zq(SNF%Ts(Tc-)^yAKO)%(+Q)a5__$2-BWAv()qhD=Tgd_WWHwNlFK?nIx! zZ^Zf9pWO%(oS4|Vfo1Y57{;G;r5o-V@Mq!lXZ5lr~eXL_Gu;zDVs3Mb4!iDh9hDtr- zW_XX6keR9;^ow}$0CVwUI0+|G-_HjbQh=!<;J?>?t2^Ey5Msl{bLJCsfy!wx8VtSJ74{s2z+c=+AP6nd9Z z&E4;EZybq0vIPbSPm3Rq4pz10sM9_16e<%d2uVhiVH>2@YaD!hk7?KMVde0aq#u?k z+SZ9(Mz9`ocOD&#t*`pt2&Mh3;wc(c(iCrW?Zwh#N?cW*YH%^OT8!+k z^aJ}y&DY{Idr^89OkA^VkPHCh5 zeg5r_N-5SD^? z(H#y$0OXRLk!4_8;dv#)Yhg7NAL1x4KLgpVV9A!nD}8qnV{VZXd~G zsc;=kHsyo>n^TCl85yNB-S?N;Y1Q+ryXMz$A=du9ht`<~^~!GCWJ~Hl3V*)LX}P;T z!XtSq=+|3KYYML|i3wrfXpNz%XB2{?wSKdFx@K5J2gY4Yr@E*m;WEe1e71qHD5jaU zyE=ywS9MEWO1u(yi2fhIu^L(ji1Y}g0vu6iatvz|nPx}vb5kH6VowM{FDG@4XGe~% z)^af2x73;wC$tHlO4D3aQj_+K*L46M6pX{jrEd6`B{L z>%Ks-e3|hQ82qCl_Q&Cl+FZNShG}>CucXT1^|C{>?gC6OIbjmJ9nUWgRBU9W*Em++ zoz?W=0;Q*{)B1N8=yrlSdx})T{W>dgJ2^kU+x#5*undywD2mdC=&tT7)~Mdz%Hw@} zFEA(ufRC%jrQzSWLf9KfgQ}JF-0kCN{|q|0t&KC@;Z7Ve!5f~JNORpu1P!vTe#Wt4 zrdJg-WLEath2Ql3l92}r^y|KJ*hiRU9o#Lus7+vDt+2o?Vnp`G?Hk_}SK*-eg9^Nt zx##i}ND`)_gggwb$MW(WK&|}c@RkXU9El$iPUQIk4+W~S`Yk5 zji+)Wh~lS=G}1xJiF&HCAyYG7>IL7AzYnGJ+_**WbVLUQ3mrY~k!RgZFO2g-A0tj@ z5EoUT|8X8YWJR}RH~QEWfcL5@TpXmaWoPO(Hfdl&MfZ^Ew5%QkN!j=?Po&sGSI5o1 z3vEewy>`@nj`zC;J)S1P5B8j$-!5srctnxETewA7bJ_fKT)&Lqdw zv#FOfEBPA+Zh`KGg;OhUXA}=A(6y}5EL2YvkH$4sa>1w<2I@qBF(cwTo7#lZZh~%>L7-`iS=T--^2(`S~*b}HJi4_V&1s7n6!A7N0{gRi~;x)8bhJ0Vhva* zWwu4p7CXFi%wl}Hbe2==^0oN;72tA21c=`X385BtN`P#9tI&<^LE)eWTp#6nP#wm< z0*zBYi}LN4*2u{D?-5E_P9ZyJ#Ui=e-*ZMW*d$|8{}1(KwJ+zAUptIHuFcal{%?4fzR#|5-lMU=ZXBh&1(g(I|q9? zxY<>aOf@>#)WRmHai z7kx885odRL`Wl=@U*^(sw{hI*1)b}L)5QL0DoS$HUn>!~#k%SHRz-s}5Qd}?*<~Bw zUbOoxR2DpKodDy2`tlf;r#jnrMU3ckXiRyy$3P@%)jp#JmCV~rU?7DUCs@s)|3Y7T z`FY|4o*7M-36!jf+>z3g?3X#jT*L=2IKO3Z2c{Xlhd360I*ai~H$BBSSzD0*U6C;b z21a`&J!*kJ63>=uRSLnWS&#ri41E2e@+epdV6w9FsbwJ1&3yf9&D%YRI7($-r#wnw zA~BOnG68f9d}D&Jp(pIwd$hzV<(^vUpg>@9u_<6d5`zhdVHyeey4>iH#qnkb=lN|f z{gd&(r;~sF19Wn5B?7ZYR69j*)d1DvdrHPErkNq#Vf|_PmLxyWUNuODGPM^IDF!gI zu@UUZ^30uAyiekC#Joq@LoFbKfWttc-N&!w{>-Rlrdi-{%awm3#+jJv{ZfV3S2qpe z*dq6m_72oft_zqRYYq}m#;dX|YM@)uBl7OFw^k#wH;WO!N|9_!=GzhU=~#>|{? zL%D^TNpaE60X~5jFZCLpZ+Z`F#!pTwTi`u$Xx#Wsw&)mLnsB8{HvyvQZ6=#qc3q1( zaL8p7Q`^TZo-n{_srD_#%L%@KgHWsCp9 zR%Nd)k$?8`-bCWLl#%G%isFs1OjQso{YVHFT^`~H}99aJC@uKT2RZiwBo z2DaHV^_Lk;mVxY}LUn$x4!(}Asm3ai!JamY#=&M-On#aDUoP^}9^&lul{fCes?dDn z#oyT%S4TNz;mg72m&=TMYV}F`WmA?YxrK@8yAGS>yyEG}pEtzq*Dd`Y=4wuGw+<)+ zYb`O{u2luoMZV-Al*%M?SV!$=QtXra8ObqLb5xd48QVpvxs;1--kT3p?A%7qVYGmZ za|y4WP`ogFR{y_J4Zca}+o5l2+x#!(WUrhTbQ>V}k$wRry%&)jKnG)g$jOVK-z;G2 zV?-yUrZ92k8uaqw_{;Q>-4-#!UsV33xwFnC>(WLso$4M3%^TAn0A5g+^cMaZ^)jRx z60oNXdgt}NWWzuMd&3QK($;?<{tER!-(O?n?bHBUXO=bo!p9>t^Ne${7lGw5bryZy zK|}G)?%JkJe{}zqXddA5#23+d94!7m@{zi_tg3O*?{3zbCDD>7%&>vi7mn6peZ2qS zA$KW%!Zjfgj8+eT4HW7J+JFAsuQZ+5e&{5l%AWYz`G^ zAIubHoLxoDHUK^KewqP%{R{9<$o$O^59@edoj!bmgXh@X@oj+g3~X$AieRcrG}vdfveiVOoTkE*FedM_y!8G66I?%h_l zmxZ#j$pl)kNmRbox9sbYnq?uKpOeug+9Mf7K1h|g;#<6lX5;(_ia$@_J_CQcCgKEz zeSdz0>a1nI)BMoS^JpFjTVOsP#%Pq$BWGhR#9@}nxZba)Ms=KZ60r6$5)7)WqG%OH zovjk2VH(|&fZqZM`7Rd`w0&62s3IOF-IqU3xmb5@%?d?Vw1b{-hMKIBX$&l;Wpj4I z1YAV_ZG`-cp;uUtliploCbK^B>}!__d>U_S2eTNam~ejiw>Ve7xmhz3XLO;2;k-rB z2cv^OcD}XZJh8=P2g5**sj6AE_s;!nLK7RM-W>VSNYexN9gZt;QCH0IWO0}LMZJ5w zHM_lVH|*o)r}`HAck8QuoXpvExJG&|CJOn(2j>~G0&>~`rzzl;q>lkC;*`q%W#>OL z)OX=0nCD}j9=RBz41T48q{m(g4<~9-;2awnkDhR(r1E( zDp+3gFN7vWN;=dXidPCrw|~heixR3BKmGT-DK$VFsx-Wx5^J2F;pOjs--rBOvKKup zxh=fI^RtmBvE{QGuQIVOSc=&FzXJBmbvFvI_dz3brM? zqKfeYxMHe`(?Z@zwZ)Q66cdm9bZ52n-fO*`yb&_mNp6iFH)Dm2$^eHuAY19Dc4Q)q zFL{r-h$3X8pDKWVyy-Eb!oIES%uLp)m=tG1dW}5nnTnvSqhpP)NAcCw?$Xj46MV~0 z_T3u4=$3pdBzaM680ZG$9=NNwC>P&Qe^%`x3zyQayoU zQjS8wxY$IQj%j>R6-g+ljY!XF=u- zT23}4PM$8q@8<_xiQo&%8$e$l)yB}D;vR+(38ZLgJa<1G9!O9si!V<;5Hr{|@j&g- z>Qirn&wycTfft{<-0n~K^+&6bsi7k|sz^Mg+wO0Xk(VPk}|M!pC_+b1w?c0W7fV@FBao%ddrj5Avj}Vsd!11{nNrmCIX> zGufKbLUaXK3Lw0Hb6uLHKL>DeR93Ix8MN;VKA8 zU8F%v-iHALE#gD)SWz}{x;U0A23V3)8=;ElMt>C6A}CCb<_yx`p3jL?vvDk=-o|Ty zaFK6q;}s9A6)I)es23paB8~PaX1~n$2#*VV zAEvh!Lscl=MGrerArHi~ORx5fGLGXs!sCbks=XcFr)Zmt+EK9s_O7;j6BQZeqdEs< zibxlK7qUnS8}$=8Nm$L?IF`_T1|6<+t~8WBXyLqm+>cM6`AnpjHD!+`EG$;WjC<>v z^|`ant``eiTh$Qr31c7qc@z=F(9zFXOavO;g`J=as+w4jF42qo@l>`>!6)p`r$W`(UT<|}irN{+&;{MEWBy|H zzOnqVuy#F`+N@V`&&$^#%W=!%Zco%O*3p3y#ol* z7)o^=!Icip?*SG;m-KTa(X5H5D$$P6sGnzjKco@P_Iyv8B0v9;fQCWE{p$d2{xnT@ zDr$>TP5k({wvpQ4d_eXuLhg+mN>TJ(mi(lmRE(U60$!tC^@jZqYjkd=NSQLd7FM)I z8*0B$07JXXOdOuI@_owIu$Cp{?^$-bi}=B1y#4VU$p%X2P^Jt8q|K`T6vD;I6tr)P zdfr7Npd~!>;*CaeC3_5&Ee%oOYt<3D9A1rxezgiR4(iI~hUr9jsxWZ?e9M#e2F;l{ zfQ<+FUXnq$0b1N(UIaP8MH3P(Q8;ZbO|#u1larEnTIAe1Qf$4(S+zfbaNR04`ys@c`LGw zM%=7Qak>c;htACiG5;7en4s04nK1tAVAf~Jl;-Qa+ZP3FaU6(9m7tfLVK#;mwM3ic zciIhXVpf*~a*~OFPeiHC+7&2+?g|p~G|fA@#6$nyisxElLCSA<@V6V^k(l*TS>b%h zyKUSO;v%KVXs$%RWQX}f)BA1qJZmjX>m7d!PgguW|3~z{anT0%+eyBMC#-G;Bwq1C2Q4IQ;^=zuV(VbvNUGN?=Ha%sk6gNrwsLPy zolBHUR`jaoH7z(a{WdTtScI&GG{qrxEcqR6J}b}u3?XLWPd_H^;z;RHKq}dt@_=> zR;hg?VKVewB>na0eX|G`6a-t=%s+lIz>CEgFri5YrXO~Pkj7q~4bWels~Bws3YhZ1 zra816Javi-&p%Keue=|g>~+ysR`Z+%DgRqo|p#UVuZtWsab<-koOr4LlwR=bce=D%I2j z>6cI!DW1&7AD4_GpOHGjpd^UYO(%BJOEUy#muSiKjp0&!xkro{^v(2{x3@vWD*2^C zCd5=@=+&t5jO?o>uuadge`p%>Mg zT{UTd#gtDaJ1kXAH`py^FHp|!0@CcIW#!z7@)HwWKK!B;@*Q+PTDCT~l&hx*=uO)* zHgaIA%ju&+(K5hQJq?%ITBZ58-jC&Fn|1KDg7YZU;j{Q{01cg4ML)r-BBHm;9h`ua zELA-FQiAB?b~RLrlNO^zS-NAMMe;TMc)zWyhsN8kM9_edO%>G)4|pMPU1Bg!!S`X+ z*3S21q;eRkFZBsxP;6kz^R^1IEE)6^n1zdtDcK{DO>XYo(_W%y7e*ZGqTdE)XYQBn z79PpEmAVL2SEF98hsMyx-@Qdd+Q|TA-X{h~U6Aj;d;PBD*Vo<)$cW@O!lV}G04*{5 zVm-wt!%uxb%Z=)E4F>s+TKColPNf5~_H8o_ zj9eBFyk}Y!5CtJI8BBSVR~wuCLc4QvS}s@A$OUf>-<8+uba35t|I?z?tuJ$%ZNXDW z^j#`Gv&`0)K7i;(tGN%W?!r;kd>_d+mt^4XZlMT1*zFbSl6|>GuDH6$^F(Ze$rikA zI(V?F^|bi&CqXGr#~l0ZlmE{Jpoxutbs|=Tj`OkZcWH}ZYlVHvJ~S8$JVVo}cKc51 zhxe=*#z7MxIk1Gow#gGEgY=N*dUyC?5)IOcs^Ap;tOdQy9`(B0D}-(jrLLqH&qJj? z0fYrTzfW+QRAa+_@Dp*z^6s|^Oa<$ZccG(is3I+50I{gf5wikBXGTOvU6PMNSs+0& zvlqyY&VTU)*`m-i%|z#>V+MT7aso^g;m0chEPMekYn3H_2DeB2`0;#Y6j@SQJBows zMgOp2it3|%qp?#lMc*eR`ezu(cQdILhB3BHITebYXfTskWZIXwvJsfC>b>oAXZSR7 zhYRP#ZUN;tlXwEGsS-Dj=I3T)?7jHu%kuBcr2bmv_mQK}u{jVS zekra6RftY7>Z`mB5@L2ms+OY1!A%2jMXK5Gp&&w7dek4b9 zj}+@NMpnxh$S@8K@f0I9_WF4q;-0GuLE?c-88QkYWAgr=o0%LguuE zXJGc$u4>O!eT|6LNRSm5?AdC<3VIb9du|g5N0H+1Js>&?0|RYwJ)Bxh#*cLE6+3rV zc+_7p5>j;p!-c(8W`;t1|h)J3yRLhjZr0PAHz(m+erBnB3T4sSlCp5SE z=>89YL?nkJk~Sq-#Ki7HXJ~o#y;-q1VN%Xf?j2{=irXjbuc*eaQzO#EtjDUN21p~) z<{+-c!Ca|B$dCyrq$%pj{7w!tKiKEE>hI8cXW!eOhj}9)9Z_S@`l(*WsqYLQ;yE|x z((n(dDtVUjs4xLC1L5Eif&L=yFt)2@0*P-+of_7$PS@@g)+ z_qGq7T5SYkEgJAAY}=VTe@9E&Xvs%vihFyxv(R_$$U_T=*rqrArmjSM&m@sidTNyA6;*aDBO>(@siye5=z~{##Ygw z7q)_zrZ=kiOsQ=c_uC*}2zNp*1(#mf9uB5~x1z2v7tZK9o-@b3fr8kFp^5Z>fmxl8 zciLZU8V`L`_g3Hb&HX7q{OO$6#iH*C>=rmv-jDPtBNABaty)uIctW=_nzX|jqW ze*OL2u8yD8cGkbATL-Hh*5q5!v{_bhD8R+4MeG9gMQeOcwl?+rE7c3u_it*N(enU< zsFy(fO<8=T_s7*Mxm)*IhK(4p7PgR$0`G!yz`6i-csOxK-Qmv+Zd5CW_}2aKMphA@ z`$pzR&I3`maC;{$i9LkGwU- z4H@Csyp$t9UI*)KKh8_(UrFJh4UctYeHV-dG8_!7GALu>6l$F#AVrI0y+H`8?g*if zkQdevqxA(oL8Y~QL_vQ*x+`{8^Q_iN>}UN-MAjf%jRG_$IZ^gZ-ueq#uxZbU_1G5- zyjIA1+`QvJ?l!){FsUk_zK@nqugg`Vp;a~{Z6#xntxi6<;--(_lq#j&jf2ew_m$EQ zMcq$al#VG?RiQi0%YIj739z@$qNBg>UA9Oojzb>3^W2=^)YJ7ebA$pE6sbF~mkGmY zg$F`*i%MJMnyYg4TXHJ2u_?bFik>|}Z&nS#)K!3L^8sL}G0@lRqh(@Y1A=g0%H2NO z2O(0@-|Wrp?Og`Lf_^I|Z+GwXMYv3T|$J#Om<&E3OWQ=RW%QKOLoPtH63!NX+HBUl-gdw@qRZQfI?d5;V17RsB;X-2OL zH+Wu_%@4)dQ5_carOUgObyBU)VCS6iHApW)iwex9{PTXyH{@@)f!a3WchsdINo2fw{K+6_Dspa@ZQQ5!P0eEl z67-4U<6?6Q-Kszphw&vP@K3VaSezo;;aIB!;lYa-QAbt|BUNwMfQvcdIYA1wo1Wgp{rc`JpSlTVLv&hgvkKKE)U1MNv!BM&yIFoI?uxIyBnfbqYcvaOEe&9P zaIw;c-8EYuSu8ZWHE#a{GzCrm(1_t==r?+n|o$`a^E+Uxp zb9n(roK^IGFCR%)zxeodwBIp`(K8B=5)lcc2(m%ULI4tQ3%GuQf&5?54ZJC1gVm2~ z@UQqJRJqs0N}yjcuaT{c_+On=l10mJZ6su?Gc*KPe?KRGl9}7~Vvz4z8bKR@k);FA z@GQWqws1s-n$^fmAUo`1rt)@GgveR{GiEsN$)C?g1bv`bZ;8xzz|nI~Kv8Ma5Bs^g za`4m_)58xfN(YW4CNx?`0osqMdVxJOc)K}HgxFP{%#J{f?>(*+caF=r5D(Q8vz9;Q zt(WyrfWZZc1~5R^F6^8;VjNJxcz4-|>IXXv08<+B4!@U@Sh5dm@$s9|e9-@9rC9k< z&e^;b-h^?*#mVJ#|J+{l{)^-)bShRDc!7AY)rrR$edL7PQQyM$sVpQ3m~fQ4HDp!x z`a&4v*Nd|Sisi~GXVlBoocl$1JU996^2mdlhVbx0(ddtvxt-IeUM&->*2bzXu)ol4O6h*; z`7K%09~-$;Ypd1BA%5Qi-ZE+s@G-O&4p=@Q+DfW%HQ0)DE+u`rQi$yw6Dx}LiR%8{ z^a%x_E$;768>oA22&}DdE7u9EQrDY#3tLIoSkc$mM`&AQMRdv;%{kR??u!R2;tCTu zx|YADP-s23XERQb#IOJR9)XAG^^%@n&}z>Gt8dGc{LE(8Ujp*G+`t}?PNy)iP=dC2 z;IN;1eMg=LhB4pKAL(D{TW($dMDio6*c=jQ_lY+65=K=g3JfGNs1q&r>COu1)<=`F zJfxi*$Vrr%5m0lT`>9@d1t#pRSIVoE4_R67@I zsK74w8^PaH<{2e+T`s)`MQf~2sS@siT{ASm^rGP$$j~N1m0*rFqkPz`X8pNzp%N&X z+Xd1b!XFx_ug6;UO^zIFeDR%O_N^rc4S z2_e%{n;0?jTf_nP{|d7FAN!ygY&WS@LTtl*)C|2*{v2^zdCmg^8 z=tVn0b03h4id?zPJ3ILGhqMN_^l->)u2BD~dDE+}l{nFY!9qXekN~ETx~&88Jje8; z@Bsu46wzFWf{2rO4oXZD<6<;EtD9LSG^!FZi?TT5>1R-RzK~g!pqBUxSe}}DgZSRF z<42w_3b`vX*9z^drY?aozm_q`O*FJ97wd~%Nw`UDGbz9voB%ykqRRaYyWOX4u$$(Wms3KKGg%2Q=gW6z@W+8|<`{k%jlKfLRCT#dv(q zrQJC6LeD_C6g{##h_4IE6h>n`&U@r%=Ou$J!Xx(#*YR34%2c?|JvK)MJFAvx`RF!& zJEjm%SyrNH$l(%@X3ja{`!=U}+c1p-KDF?9HV19%jeYq(z`NKZpJ~2{_>+FcEs0I% zP$CYxc4Oi;lxs6LCwFt)W9)RLih zy=`6Hl~X(eTN?@N39hQNX!UyrQJ*{!QmwCy)|U@pM0JQ$3Zn zskbiaSyDBs;t30Qs&J8q^<}=aPpPb)Uv?D_WZ0E?wVCNVWh?(tLg29#Ynpa1)k~g- ztxPrz6lG53^MhSN&X9S!R~g=?86vZ_wzeJ+OTMD&B)#Kl5raa{i6!%KJwf_A*oMAihJ>Q``7GzcC_Rut$ zd6ue~Rf+WDPuTCBk(fI)mSx=7{d1HTN|UqUG{S?ere9)@UKuxX)r0bTe(RIQD*NN% zyI&>-upFVMQPdOF%=X8rrpm%H;q$`IX#uSwl_u1a`EvmoG&NlrB#=1%=7DnK{A)Qi{@QFL?pxZV;AUpS zdBO+Q+W6STYy2%}Q+N9I&D()1Z>roLjxzdr4fq)^iYfxtJq8-ip)Q!kd)5i0&OIe; z&Kk%}F$$RXb z%01P^R)txM+-WMy)9;=c&O!|$qkZFFmkn$9f7Ht49X@wGo$6DE^L9CPV~4OyIDJS| zeB^U=yZjf3n&uio6J6HlWN{ticU^5anBk$f0+adjG=At-$IoBgOS*oZOS>9&WS!zC z>!R-;bWnNS;!pG^nHLo_^RL!BV$8XXWiGSyTtLxj1J_F0)#==5E(Tm_nP_F<6Ujdx z)(ee?1(nKtn1)At)?1J}l%TkP+~e!?-;=5s#a5YcrJ_zPBXMw#LFZ9W4v_3*n7C-v zvaG1_<_6f`@xGES7_j0lL6f6HKPTgBzZU1ip}0gJSUt>B%O^*}q01TwqecepFfu=W z&FZ0>@W#hth_|njDYD7y)$u_KGply?_2_n>d~;i25nNOS+BGdKS|@t7EBD>4m)hfv z^_!-1R)$LSjXm#3l9ad{E8BS)x&N4QHW!r}=PFTUa?F_{tm~?J85pGE;8T1Ni0bPn zM$o~mEI+A?I4SyCPj=^l>#m)25WyRZ&UXYl4blkp^N;v7ZI0fvbxud8jR~?mvU^ae zclOu><+9GCk-1vfF!}M~!_@LD>(e(jqSM-6?f#>8p`Hyw&DH5Jqpe*3%b_0wfOrgW zbL`2Ew;uNCAMb_il$`o=x6p#jvj3KupiaC#oTbHM#nHy9I+1r|+}pfE<+uMSc}0S$imti`EUT*PYR{ zRM-4r9s`DwG!WP<8Pug`fx6Ry_a&%qIBwJ}l7g708(U?e%Erl0(<(93IuoC?o-LHz z?d%E}eCnt&5xyz8cz9Ej5R>=f{k0?Nay11QR9j9jk88VN&h|@CCoM_HEqFsax3z0a zbF4cdna7G#bNpKno3Am0ZAt3YJj5tUmqi#H8J`ij&Il zk@Bgu0k(m{q_u?Rbj8R!v_jJ1Hs?4x@6Lrv zzN_WE4j$~%Uh^-CL3avSJx8|-PEwx-=D>Aj2z=1ru#*GaU*Hp2eNw*B8(``^Jo99 z7X}Nuuta?%+AjLJbCFT*Cf%sJbsH`}sqD33#6yEP^oN0(*a>j!U4^QPbhu>{qRl&{ z&JtOuUE}f%KvU-%82;0GikAlHb2OxSYeirXbfyU@+09NL-we4Oig;kH78qCM&#a0N zL~mTMHX?B6s7v=Hrfsly^X;Za8!#Ho0JG}@1r-lpkZ-3es;;-rn|aBbW|&VtpE%QM zkle4c^Am=WbiHqw=Pog0PhhU7=rC$yyS7xE)yUY2S2pbCIiGSHC44i4oY~qPF@q>u zXUx<{rx|Ja!hZ1HrM}^|kWIRuqkm9jVBT)0)l-5{1ZCOr$X%kXtS~;4=KXc87=M=; z??J!xz5^QRuKna_OVvqAQtQWNmTHf{f-eM25VJy;r?S6ZYgkRr>k8*#ccqld$~Ftn zU?Gb)_HT-)rgLt5`%vw=rQ$4-;Wq}1H}SC~bvAjb1)N-cPW;wa`0Yon_nZn21I_M9 z0=I#gawE$dE-;Zx+`6O#Rkw3l)fjtmJcC`9{M-z&-m~Ug1Y9HQ@!`B1^*+s*r#!wN z1u7{$LxDuuFY2x-xhi!`K5>F@y#1rp^3&-aJQNdlB(xT&6ASL-@rt^|%M?m|Fh}`O z?8?rCpjoGrw5yA|RjZ|UJ{I@muB1L9etwPWcMT1DoX@GltL}_IE>sg-9OwsKHTSxh zBiBq94c>#>3VP>7+okC%Jmov|UNisQQVpELw#bw}ui>^htVL6lTG%XkaP)s(O2OG> zuxHsCX&UE@ri17YS~(K5z3PWeDh%S%(@AqFrQjEo7bl==uj##+1~ST=faZnPFcTHP!DLC?3CWp+e`}SAiXNjU1_>ilxOh7FAcqI z3ygp{vep`)fVZqe9@afE-Q%~KT4rRh3pn#J;T8!+-%ehkroKWW+3%)1iHKGuk!3v! zI>DM$*4tepO$B2`X9Fyq>a~&c;E~{3V;AEAylFgdN}Ge3#DM?4ZdO z43|vdSlh-;OWnYGgyNM3YRu6waU$G6Z@6-2y)CERjF$EsJLfv<2lRPTYigoNj9qFL z4`i&a(DC;#pNJR^tn4pqj!1&B^o^X1z#QdAt2^Nzb`|PWy`weyTSLT)clHZ!+x0Ox z7ws9PNbjVQlduEMi*W*Vg(J&L_IK!Whht;RrQoY;ZhaL?lg*Nsm*=GwUXg-!h7YAT zeQcpU8`u{mY}mY)f42dlD&Rhvz`=dcGfWxx z#|yH>M-D1mZ1I-thmrUEgj2gn=@FaeF;C8`Z<0GzXT*}-%oeaitz-Nj=M(+DhNI(~BVChS|UN`9qm$fH0<*sQR z8in~6NghfKv88Dh-p9@q-uKy+Ur`igtDV<<9YvZ%<9!BqmZkOTUb#((ZP!NExiz)O zZwnr}E_mHBke^+@z^hd`Z!J}_6YIb8x(6n3z1{3_5is^(-6rBK9|fV4wx!UI*gyJ0 zUON0rGMK7tExFT{AF%=>En$`f7W#HNRd&q1bcNu%{5w0QE8twb#382Ld`Fb)cx`vN z6)x>ocz=mpF*z?e?A(IXExEshpzM0d``vE_~D z?uv|@L_efw8x@tP<1MYd%BOT*en)v33wCa-4uB8)7&?@QBPZ%S$S~2HTl`n$W5nq} z3|B$}{nI&57k1>_Dw%5-80IU&ZJol8X09fiJHU$C{O1^A>Cr>CmxKa z_oqjPxbSm77DLl0%XE3bWojSN!KD9jH9ky9WlQU2%-0S*a>rP6t2<|Ew!5h<(?qe= z!|Np_rW%|7qN$9zrydODs9?@hVh#M)Dsowd9`u|eXI&O&{UqAB%)VHQEL#O<?uIFq<&0`u(6TF0QPZ+NRS3F;Bv}V)U_^V3EHZrirN*Ln58&|_F#yBs zFeb{sqhwRi_)A}*lM6PWxR6@|Aopwokf|X=w{lKRNr;Io(I+Zr@z^3=pdId)0BiLl$`(&!2oy``L)o7-^Yagb<+dHqXLXCAga_ySj3 zHWvIj4^c5?6FTWwDP&FyY%b9jWDLy{>rcag$aT>O*JIDka(nviw{m=UM?<%c289Ex zP2TmEs;B>+%VGPDWk@J8MU8b;V7@~8w(E+q+baRRT)qSaTMI41`y-J*>rD(of%Rf% zwWoe3qOE;g(PJGm+48>2e0llfHgSA6ATyznNAo{EHn?(`d6?e3I)u&5Ehw33i#E~q z{MMtMJcxNfeG=NErTe1rWtex;rZP;R$=h#qjTLS68TWw+sqL^&~n6zJDdrjBkAK-gnFwx`ue!R(mm-QoVd7mOPgJhZ%RN%@Mn6zDq>5mACd03R(o4B&gN)E zN*e6X3sCq(2F_|I8CA!>*VPNHvRt*Jii#gR&jCz*rgx3c@p;DBjwk5Lk z?GN&|4FIbE=N3RQ>G!r!qv@ruS;X|z2!I%x)u#%JgnmgCqK3-k_Q@JFU{VcP)3_%fHPW;h26nJ$HziGl4Ov92ww(uc(Mussa0& z%4WR&&T&8v%%2AK&dcmMYNPpdyu4r!fbQ27-$n2x&UG1tbbPciP0gWGALAh$?2)${ z0{kr_hxv&0Uusd5de8W}dsq*$=OWu-Ihq3>Nd;nfRvkE2az?Otd3A%>Egw9aEtrl9 zpLZhmSYRAa(D*qyIiXX(r;9cIlv|A7sh#Rcx!bfVk+6Kz9dW7@GEklyI zOm8@Zm()5|@zk6KDe7AKI*hEAm+!y%Amdz(vmy$79P=esNt;5-8^5;bmt-~mkZ~&( z$Dv0`IWrZ>db=f~H!|LSnL*M~0Q?^BM(zRAJh$Qx8%%jQImJsoCN8*m2u9j|gS0R- zxk~aAY^(K{Q#;=Vfj}zo2ZXy;Vg}YfyY+g8d~OZDr2=VP%D}F`S{+ZTdFmx$-c}KQ z#BPagNBDgvuYnn^#00k5`N3e%l0GQ{m!pgCvpw~9BnkLg&1-h;P#uD>tT}kL{j!eN z!@10WF$B!?FOI$op;5@AqmSoSy64or55grNFOLpxg}F~nDR=esu-I04P4m9X|A4Bf z7=6znywaT{WhL|^SeCL)EIXv^gb^{D4N+`9{fHDOn3nxrSRm4!_vet5rpf-|9;#ig z0EQusmW4Ge`w9t<$cBcVs-1AIXbpAt-XF;J{&Qf24Xu&2d1S7yW22tHWw`VvmNt(w zAsk@d0X(Qfd_p+CwhV_+@au4L;>&kiLvFE6q7Ug=$CM@LF!!G0@Yk#fBDP3Ae*UeD zf>UzP>F;K_buV)lMdFjvtWwbWqt_&uIjQ zrWi0`<15ImEe3tm0kg<8D5pI|e~M@a9ARj?OW8El>idYh;Qfy$5%^r23dqEBNZwGa ziMwivJsA6*OOw_KhaVPvgB^@YBS*BG5huK->Zx&cK6T<|r33N4s;x&OGid#ppK9@* z8?&m_-bCZXn9kl_)|sEBLJWKE;ztpHZ@t3Ke1CT-uc*;f&)(kNH!ChK?ydd!HEdw3 zDg4sJ^FnDPXa_7h)!zP2n6=7PSaGk}2`nVrKfl10l{JQnE^$B)ZUaU~YU(C$;Il>BK--s_TX}SFGmCmzKm)qv4X}S14av zl7MdXHJ8ua#Fxa!&aSSd2?6dS=6iGM@pHsCrO{xiog^Fk4F8iIzGU=$xNUh5B=WRq zRVNMcdiJNZIi$3+Q?1_yQ~JaeR|Q_ALBH-F!2YtdGc!x8JgqG$Ki?&)wWGyKd&@<>POeAxo#7w==R_JI^XCYMV?U-k_55IQ|)1Ve2o#f z`Kr9T`M+pBx%uNT?&uUYIl#ap*5eOAXA!|xo!2+r8k zX4dNRy8h4Izr-2j7whG9iThchdbEdNtLIz?F?8Dn(NBiH_0FF|(Yr|F3J6@) z{jgyH=(X9j$xsLmHKA6^A}S)nGTLhAX*TT+De;dQlcaMW-2Ixo-CI30F-)XM7DzO! z{l&%2HS)D4^=bL?`mMPO4A0Q@BO{4kl$%%6h-pI}%cu%pOD8-1EL*6ExM&Gyp8iN< zsta5FT0w4llGH>(+cNknHC@Ke)~3-+g-V`mK*y>5QxJFmpt!jB(cZL43EaAiudOYO zCgB?zJ$(QEeR%3B{Cu08>6x z#)!Q~cgY#)^o8EYLx|mrx}{*e5`lBQb62Z(0;;_sNotDU(?oD~CE%7gMO{9DK7+mA zK88LIKx+s5!$_QcXxTXPxt!9pFamb|3GL@~y@Tu7_I6%eD4(|Na%t;LL-`-V@D+&M zx%qjlVdgZ~yL@rJWrXx-oT$NrZ3(Y8ldxrR%2rVTe`!nc;abFmwfT8!+G(K-7ZTvu zK0S^sLJsQt1F>yE?J3e?PAj!+kk%JQ3}6JE9ruQgmfOR;XWIjey#{%4)p=twg%-BX zH{wkMOwz8c@e_WZQJQL_RZ(ekad2q9IbdE9eYE3QJ@%@LCN|!;en?X8)l>#iajk~_ zhNGpA`IXXU=a&v}xcsxz2-uP?W!&KVB&5!nKaGg89`=^oZNz*Gx(SxL{PQ4D;y4O$ zOG0a}=~P0WxEK;gMwu~)Wk-A?MwIr3UjC0Y1^h)^nR9eBq-X1F0o`^}Q_~l5-GJSP zq?rp~?*SBX+zFRH@p2B6qY@O6G#T#>1g9$;eQ(|rYkaf6J6w2mW`+%M;G}uJDfzCO zd3#9yx)|y^60zfY!{B=cIc3<*u;*=;cdwLHcGh>?vRjQSeXW+hq4xQ+te#oQ`^b#bMKE3$hB{r-u6)>@M`IC2Fmg%?LRNQ!7K*0I&B*@)&%gTW9K|T`Nj?m@04xXv1>HiVtZDeHR zi*byTr3bMIM&XKi4>9JHl``xK8LLV0Z{2}3)G&&Q+${FOyme2qSGj4JYA-8mWYr~n ziWZ6ojQS*DhGvwVYx}Lx#;bbRvcw}kN*@alOh3Zv5 z7knW!G_;ZS`QhMxAJ8cGYS6=jdu;kE9LsKIqplur=-)8?kze+*6u9Q<*M->jI%ZQj zAP39>BFy(;97A|T%frd|iVBob9@?5XiS+W_?klHToD3~M4LT2vgnIG0c(p7w=P#Tq zGM1x`jEFF-)*A9BAW|hT2aSa8tr!JzyIMSFnAq`S^>8r>MXY40WaF;jVXDYi^2|vZ z!@(4S>Ylf~b7pX2)S9Wha`d1;`tD@$pN2Cj=L2R(4-!U^wSM_9&k9=`8^bDk+!gW} zzBVw>jttm^jSG=8TmYm+a^6^#m$Rfndey~1rL)Y<$i1D_i83v4hK{FRXK0rFcxxnI zla%XRXPl~-(jfaT6l84bH%POyTzdy5rruQHjB4lZzuD1KaTXh8YW%|HEym=5rasEn z_LdBV9J?p-<kj7_>wm0Ts*`_VHU-T`b`eoc~Fji#o0>sBTk3WQQ@Ow;5KurWKe~ zd+UoSrF#gKN+NPoWj$Lb1C9~>PUMMdAIf$Y)))${%$;en+3$ySxtr6n`;LKb3hy%(JQ~cF*>{K5AhZTICFzJHr^(~{B|9X1%^he z4JOO-99`E;%Dm0mV-r7pO3Kko`|Y(X6$1LXPoXqLF-ccQyAD|nrxf{7c_0glOzoYA z^nQ+dSd;?p*co2kaXAu6xiEJC4av@XzCV8kFn4egS$#4U#qpeePHBvPI2= z#O(0nt{Wo{t>oeHRllhr#fQ3zPpDm;x(2Y7Ud!=mM=P;u8yU0XH5Ww5n4aVc&Pike z`YNVgg-3WEMsQ&L%WG##WZ0Ltij(%@Q@s-4*=iDZYc;B&hgrhfhE|mAl`@SOwf0i29{RpBtI-cvvc{&+E zgqgTAM%bSZGGJyw%Ee%)Mx+M11q6Dt0b(+bY@hyhJb+NcCa{)mWHQ zTa5#K3}t5(2ZS4uFgck!_s*$NCRnR+L%_hX!y(+@GPPPJs-*Py(4LLWTYF{ms*tzY zFsTp-9zLzl?5qhDoF7=!^(HkK8d_Gi<0Nf&EmoLaN=wUXY{F#3dMNjqU@0zb*Xl|~ zu8H_p+LQewpR0P}kBv)M+;XxNco90@V<;5eeVx$F`* zfnb!iV_hcy?um7oqj{|MQ|nTz)u@uuD}Dyr{Qr#$_@dW6juV8`BaP=yz($?KrWL7V;hb1$&=4n8Z@wA zVea?M0jtMlJ znbL7MIzRR~GZXc-rcugGoUNhlF{U7mYx#S&efMi=yO!lVgf@XCE*^)Jt;=jl28(*= zdd6&Rz8-yu?JW`V3X+52rie|`z{R7)nvvg4+Z{}5nqoi0X3N#fh8?R`<1 zS9iF!wP3=7Q6gR6K|-#6g4tW%dG@<*<$1$}#s&sq6571wP!Ugv|0{+}u)@!X*Zy96^6&K!4SbX54`=lSiNggPiP(cf8n>hJ#osYsA}=u$|qdadUYt z8ZF6vg(8$9Q2#aVw~wq-Rmm@Sy9VXsxPoqyS3-H)27A4E+w=(jCl*!c3|*-Fp6Ut@ z_ZyKz9<&c>axJ2tZ0<>h4{0{bbxCU_)Z<(2=ub6{r?uL-?UJ6C1=uaDF04Uft~J(} z=0-av_=cq(&@m!e*jx^Z6)Ep;JmbD&r7Ei6-Ayq+zkcsqpCJPMn87;f0 zn{&$NPDvpXFg#HB^y;H&Gv%v5p5#dPGN_$*>bbC9uq-fQh=E3aZ0Q3{P{7p;)qOvV zac)UTLvQ+VYvIsvSV07ZLdnmmFSFp9HT+`FB5JrI$a*ehMcFPlSLDle=kq6?@>V~n zGf-Y8Yvt2U4f&f`J7(#(l!`a3wH>uBQ0<8pedaO`L%oaS6NN0Jh!@$7(gYyKjq6RI z&TF}rKdbid%grNluUQoT?#J=Hsovn8H}5`UBF&?9sJwi&c!_H7a%8`-P6`RMsJ=luz8q+Saj7CFSiT5*$jupY$ z9cF12s-RqE2e_)yA!kHBolG#z!=Z^mDWm&?NETnkd@-@C+`>YXxCv_4P*<6uCej%6 zHd^s$L^JiC6;PeK7#@(T)k&PJmAew3xSuw4`eggvj1EU;%;HN+OO1eF8LW(yGy#(t z-1heNqt&U+5PS=*wf;?zUOkm4G6L}PMl%D5jp)K{nMDGF*|o)4#h>k?v>g&Qyzfu zYcn7wg&|Lk%pX5Gy;sK_mxl_Z=q}Fh1tU)chaJ_snLoxPv%JYUNaQCDt%}B1Ro#a; zWMH{To4DGNPui_*g1e|+NYznvguu=->;3wMAVA2w%aumFHE~$zFWT*cJrRLtvphn- zD=LcY<$Bvo;n?$)_wedL_6lOx%il7WAsYP}@{OUg{ZaS;1l_CLO<(%bhmc_O88b{; z)RDp+`SYFa?XL{?_Um!9Ex4}za1);T=B6`-m z#YCm#&S{K^QrxYqtga^^pX)Gdf8+YOh2=b6w|IGYp~B=#w5I{YsM^afKJ+NaAKQ0p zs#@f<-`dWC4Q0#BBx^gudqohur+BhLG-ITPM1!NMLdCrGH7ROKX2Ikz+&OZ zOin+;KZ*XLsAu}MZ+0^w1JQ#%H;xOTH#lu&2Yg)t0E%JLxN8gd{;G*^_1QauhgINw zc0Ibe4m-UbkfvvnfsZg1rj*kR&7T=W&Q~6;nhB;8*P8jvFs~|cq2Psp_JHBm10To$ zr6Re=@80T6X>v6g-}Iz=qNkpJ22lFH4tLC{TSj`x6Db4*sko>Q6y4m@yvq*`uN$S)?z%=d(opZfe*NLtUN zJ!@37sODwe{$Nq#RFq_$a|@7*DLz(Gw&xWa$WYN8xm5bg*cCkjkqDmjXJuuDu8kJ6 zutbLBCo0a3!zE6n>JII>{J1%YxgBAa>viSiujg6qpFiv1H_+qy`PbPROpH^dQc!Rx zFP9cMHTjW_(1w}duB)pnionyjKut1vLjfMWX$d`riO-2WKUIr2urtkLOO;qeu z5)-j&ig~pF`)_#raRMr(MMV7kh4c`(WnSuv3SmaY1|u5fVori%-2zmt;?9yT(^(% z)s~64tddcGwrBe31_+$81RS-y*L!f~#LS%(xW5H@oAfm{K9k_V7zN`{CHGp?rvVitA&wIU4a%I&9D zuQc%^C+|D_HY0vNB@TmCJtU}Wd^qhpbPK{)KpjE9BN$RNgWUW4&w$~aMNlr)Oe~$q zkry&EGs{TdTR(BuJT}Hz7Bd$v327;X^lWZjTUyRLKF8O$x=36vh@(7^Q5DyDj(dea z1`jD*XTJI`Yn_A7%5P_G9NwEWhswpz1sMzEXF|F>JdTR^QzDaSkooaa$7W7#16Uqj z-i`eB39BdP#p_?#&2}h6?RXyet@>GndAkru+y($m-V5JSY@yg7>112#dUs^RC!}U< zD4KVY13SBVn62Z=WI1|Yf3U#7M_&xE=0d(sBxxednSsIqfOhdoS~zpQmXKTSncX@w zFmLsIu0M8u{5iXJfL=rUQvdeWcCzGEw(C-*+>q$Pfb^CA$sJ(eS4w1~-<$g<=K_G2 zGXw359i#Vrk6ntOQ1M3wzg9Jq!-uCm5!(j;WQOpAtCcGM%wqo1xc~O!T@h7K`})?b z%RgBEj{xAWRsZwHqX&UXvrotEZoEI)<$r$u-ycze(=j}XDiXGTcw!&^+ui=-Iia&4 z+DlKSWpq{l?l*z8rbU&6Jm6qizQ4U?D$pl(A$Hlb=p?m&@a1p4F}npd=+n${$LQaD z*%R`-3jvt)>1WT2vwxdbR6)B;?C&mEX!Qb<2>&*duFwKZYKCNe{=44<)@n8Yq+=K6 zA@hG=(xr<4ldiZ*#Xb5zNCyUxj<(?#Ejrz1Dk_F@)kn%N$}avbUj6kj5CwxiU3O2& z{g+w%ck%sCv|Q%|Bn5*^W%)la>?s4lu=luU|MZ=IOE>b0$NkSE{|)E*|Aj~Xp9=q1Dm-;eGeKsx9C(;YrUL#{A8S4;d|>(J F{{anF_v8Qo literal 0 HcmV?d00001 diff --git a/torchao/sparsity/README.md b/torchao/sparsity/README.md index b18e996b58..49cbe51a13 100644 --- a/torchao/sparsity/README.md +++ b/torchao/sparsity/README.md @@ -44,7 +44,7 @@ The handoff point between these two pieces are sparse weights stored in a dense This also allows users with existing sparse weights in a dense format to take advantage of our fast sparse kernels. We anticipate many users to come up with their own custom frontend masking solution or to use another third party solution, as this is an active area of research. -![pruning_flow](https://private-user-images.githubusercontent.com/8041643/324607153-ba91eaca-14ce-4608-9db8-6cbb9ea1f9ec.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTQ1OTgzOTYsIm5iZiI6MTcxNDU5ODA5NiwicGF0aCI6Ii84MDQxNjQzLzMyNDYwNzE1My1iYTkxZWFjYS0xNGNlLTQ2MDgtOWRiOC02Y2JiOWVhMWY5ZWMucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDUwMSUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA1MDFUMjExNDU2WiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9YWVjOWQ5ZjFjMWZmNjg4ZTgyZGFkYWU3ZDQ3MDBjMTZkNzczZWQxYzczN2ZiM2ZjZGY0NjUwMGUwY2UwZDA1YyZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.ni5F_wDhNkeupMJ84bFNxhaSO3xPH-9zecz_933Uu68) +![pruning_flow](/docs/static/pruning_ecosystem_diagram.png) Below, we provide an example of accelerating a model with 2:4 sparsity + bf16 using our PyTorch APIs. @@ -97,7 +97,7 @@ Note that this section focuses on **pruning**, instead of **sparse training**. T Roughly, the flow for achieving a more performant pruned model looks like this: -![flow](https://private-user-images.githubusercontent.com/8041643/324607146-53542488-65ce-4d99-a3ae-21e724f89467.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTQ1OTgzOTYsIm5iZiI6MTcxNDU5ODA5NiwicGF0aCI6Ii84MDQxNjQzLzMyNDYwNzE0Ni01MzU0MjQ4OC02NWNlLTRkOTktYTNhZS0yMWU3MjRmODk0NjcucG5nP1gtQW16LUFsZ29yaXRobT1BV1M0LUhNQUMtU0hBMjU2JlgtQW16LUNyZWRlbnRpYWw9QUtJQVZDT0RZTFNBNTNQUUs0WkElMkYyMDI0MDUwMSUyRnVzLWVhc3QtMSUyRnMzJTJGYXdzNF9yZXF1ZXN0JlgtQW16LURhdGU9MjAyNDA1MDFUMjExNDU2WiZYLUFtei1FeHBpcmVzPTMwMCZYLUFtei1TaWduYXR1cmU9ZWJlYWMzZDFmNzc2NDM1MGI2ODNlMjUxZjQxYTAwYzhhNzBkNGU2ZGIwYTg4NzA5Yjk3N2JkNzI4MmUyNzg3NiZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QmYWN0b3JfaWQ9MCZrZXlfaWQ9MCZyZXBvX2lkPTAifQ.Hxk5XMuJXhNsORVNNgcKNRCk7W1nT4CndLTAC3Oz0qE) +![flow](/docs/static/pruning_flow.png) The general idea behind pruning is that we can mask out some of the weights of a trained neural network and recover any accuracy loss. The resultant pruned model can be run on optimized kernels that take advantage of this sparsity for accelerated inference. From f8f74c7c1304b838b9e6c3af74ba07446c1e83b3 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Fri, 24 May 2024 14:47:14 -0700 Subject: [PATCH 53/61] Move `AffineQuantizedTensor` to torchao/dtypes (#272) Summary: att Test Plan: regression tests in test/quantization/test_quant_api.py Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_api.py | 27 +- torchao/dtypes/__init__.py | 3 + torchao/dtypes/aqt.py | 444 +++++++++++++++++++++++++++ torchao/quantization/subclass.py | 452 +--------------------------- 4 files changed, 465 insertions(+), 461 deletions(-) create mode 100644 torchao/dtypes/aqt.py diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index f0830cf8a8..24882b8418 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -18,17 +18,18 @@ get_symmetric_quantization_config, ) -from torchao.quantization.subclass import ( - to_aqt, - to_laqt, +from torchao.dtypes import ( + to_aq, AffineQuantizedTensor, - LinearActQuantizedTensor, ) from torchao.quantization.quant_primitives import ( MappingType, ZeroPointDomain, ) - +from torchao.quantization.subclass import ( + to_laq, + LinearActQuantizedTensor, +) from torchao.quantization.quant_api import ( _replace_with_custom_fn_if_matches_filter, apply_dynamic_quant, @@ -429,17 +430,17 @@ def get_per_token_block_size(x): # input settings input_mapping_type = MappingType.ASYMMETRIC input_target_dtype = torch.int8 - input_quant_func = lambda x: to_aqt(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) + input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) m = ToyLinearModel().eval() m_copy = copy.deepcopy(m) example_inputs = m.example_inputs() def apply_weight_quant(weight): - return to_aqt(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps) + return to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps) def apply_act_quant(weight): - return to_laqt(weight, input_quant_func) + return to_laq(weight, input_quant_func) # note: order is important m = quantize(m, apply_weight_quant) @@ -484,7 +485,7 @@ def test_quantized_tensor_subclass_int4(self): example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) def apply_weight_quant(weight): - return to_aqt(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) + return to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) m = quantize(m, apply_weight_quant) assert isinstance(m.linear1.weight, AffineQuantizedTensor) @@ -515,7 +516,7 @@ def test_quantized_tensor_subclass_int8(self): def apply_weight_quant(weight): block_size = (1, weight.shape[1]) - return to_aqt(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + return to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) m = quantize(m, apply_weight_quant) @@ -555,7 +556,7 @@ def get_per_token_block_size(x): input_eps = 1e-5 input_quant_min = -127 input_quant_max = 127 - input_quant_func = lambda x: to_aqt(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) + input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") @@ -565,10 +566,10 @@ def get_per_token_block_size(x): def apply_weight_quant(weight): block_size = get_weight_block_size(weight) - return to_aqt(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + return to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) def apply_act_quant(weight): - return to_laqt(weight, input_quant_func) + return to_laq(weight, input_quant_func) m = quantize(m, apply_weight_quant) m = quantize(m, apply_act_quant) diff --git a/torchao/dtypes/__init__.py b/torchao/dtypes/__init__.py index b14aff9904..dccd22f3d4 100644 --- a/torchao/dtypes/__init__.py +++ b/torchao/dtypes/__init__.py @@ -1,8 +1,11 @@ from .nf4tensor import NF4Tensor, to_nf4 from .uint4 import UInt4Tensor +from .aqt import AffineQuantizedTensor, to_aq __all__ = [ "NF4Tensor", "to_nf4", "UInt4Tensor" + "AffineQuantizedTensor", + "to_aq", ] diff --git a/torchao/dtypes/aqt.py b/torchao/dtypes/aqt.py new file mode 100644 index 0000000000..7619545f52 --- /dev/null +++ b/torchao/dtypes/aqt.py @@ -0,0 +1,444 @@ +import torch +from typing import Dict, Callable, Any, Tuple, Optional +from collections import defaultdict +import functools +from torchao.quantization.quant_primitives import ( + choose_qparams_affine, + quantize_affine, + dequantize_affine, + ZeroPointDomain, + MappingType, + pack_tinygemm_scales_and_zeros, +) +from torch.utils._python_dispatch import return_and_correct_aliasing +from torchao.kernel.intmm import int_scaled_matmul + +aten = torch.ops.aten + +def _aqt_is_int8(aqt): + """Check if an AffineQuantizedTensor is int8 quantized Tensor""" + return ( + aqt.int_data.dtype == torch.int8 and + aqt.quant_min is None or aqt.quant_min == -128 and + aqt.quant_max is None or aqt.quant_max == 127 + ) + +def _aqt_is_int8_reduced_range(aqt): + return ( + aqt.int_data.dtype == torch.int8 and + aqt.quant_min == -127 and + aqt.quant_max is None or aqt.quant_max == 127 + ) + +def _aqt_is_uint4(aqt): + """Check if an AffineQuantizedTensor is uint4 quantized Tensor""" + # TODO: use torch.uint4 + return ( + aqt.int_data.dtype == torch.int32 and + aqt.quant_min is None or aqt.quant_min == 0 and + aqt.quant_max is None or aqt.quant_max == 15 + ) + +# TODO: merge with nf4 implements decorator +# aten op to their __torch_dispatch__ implemnetations for the tensor subclass +_ATEN_OPS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) + +def implements_aten_ops(cls, aten_ops): + """Use this decorator to implement a function for an aten op in __torch_dispatch__""" + + def decorator(func): + for op in aten_ops: + _ATEN_OPS_TABLE[cls][op] = func + return func + + return decorator + +_TORCH_FUNCTIONS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) + +def implements_torch_function(cls, torch_function): + def decorator(func): + functools.update_wrapper(func, torch_function) + _TORCH_FUNCTIONS_TABLE[cls][torch_function] = func + return func + + return decorator + +def implements_aqt_aten_ops(aten_ops): + return implements_aten_ops(AffineQuantizedTensor, aten_ops) + +def implements_aqt_torch_function(torch_function): + return implements_torch_function(AffineQuantizedTensor, torch_function) + + +class AffineQuantizedTensor(torch.Tensor): + """ + Base affine quantized tensor subclass. When the from_float method is used, + to create an instance of any AffineQuantizedTensor + + The shape and dtype of the tensor subclass represent how the tensor subclass looks externally, + regardless of the internal representation's type or orientation. + + Affine quantization means we quantize the floating point tensor with an affine transformation: + quantized_tensor = float_tensor / scale + zero_point + + fields: + int_data (torch.Tensor): the quantized integer data Tensor + scale (torch.Tensor): the scale Tensor used to map between floating point tensor to quantized tensor + zero_point (torch.Tensor): the zero_point Tensor used to map between floating point tensor to quantized tensor + block_size (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam + e.g. when size is the same as the input tensor dimension, we are using per tensor quantization + shape (torch.Size): the shape for the Tensor + quant_min (Optional[int]): minimum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` + quant_max (Optional[int]): maximum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` + zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float + if zero_point is in integer domain, zero point is added to the quantized integer value during + quantization + if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) + value during quantization + default is ZeroPointDomain.INT + input_quant_func (Optional[Callable]): function for quantizing the input float Tensor to a quantized tensor subclass object, that takes float Tensor as input and outputs an AffineQuantizedTensor object + dtype: dtype for external representation of the tensor, e.g. torch.float32 + """ + + @staticmethod + def __new__( + cls, + int_data: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + block_size: Tuple[int, ...], + shape: torch.Size, + quant_min: Optional[int] = None, + quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, + dtype=None, + strides=None, + ): + kwargs = {} + kwargs["device"] = int_data.device + kwargs["layout"] = ( + kwargs.get("layout") if kwargs.get("layout", False) else int_data.layout + ) + if dtype is None: + dtype = scale.dtype + kwargs["dtype"] = dtype + if strides is not None: + kwargs["strides"] = strides + kwargs["requires_grad"] = False + return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] + + def __init__( + self, + int_data: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + block_size: Tuple[int, ...], + shape: torch.Size, + quant_min: Optional[int] = None, + quant_max: Optional[int] = None, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, + dtype=None, + strides=None, + ): + self.int_data = int_data + self.scale = scale + self.zero_point = zero_point + self.block_size = block_size + self.quant_min = quant_min + self.quant_max = quant_max + self.zero_point_domain = zero_point_domain + + def __repr__(self): + return ( + f"{self.__class__.__name__}(data={self.dequantize()}, shape={self.shape}, " + f"device={self.device}, dtype={self.dtype}, requires_grad={self.requires_grad})" + ) + + def dequantize(self, output_dtype=None): + if output_dtype is None: + output_dtype = self.dtype + return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, self.zero_point_domain, output_dtype=output_dtype) + + def __tensor_flatten__(self): + return ["int_data", "scale", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype] + + @classmethod + def __tensor_unflatten__( + cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride + ): + int_data, scale, zero_point = tensor_data_dict["int_data"], tensor_data_dict["scale"], tensor_data_dict["zero_point"] + block_size, shape, quant_min, quant_max, zero_point_domain, dtype = tensor_attributes + return cls( + int_data, + scale, + zero_point, + block_size, + shape if outer_size is None else outer_size, + quant_min, + quant_max, + zero_point_domain, + dtype=dtype, + strides=outer_stride, + ) + + @classmethod + def from_float( + cls, + input_float: torch.Tensor, + mapping_type: MappingType, + block_size: Tuple[int, ...], + target_dtype: torch.dtype, + quant_min: Optional[int] = None, + quant_max: Optional[int] = None, + eps: Optional[float] = None, + scale_dtype: Optional[torch.dtype] = None, + zero_point_dtype: Optional[torch.dtype] = None, + preserve_zero: bool = True, + zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, + ): + scale, zero_point = choose_qparams_affine(input_float, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype, preserve_zero, zero_point_domain) + int_data = quantize_affine(input_float, block_size, scale, zero_point, target_dtype, quant_min, quant_max, zero_point_domain) + return cls( + int_data, + scale, + zero_point, + block_size, + input_float.shape, + quant_min, + quant_max, + zero_point_domain, + dtype=input_float.dtype + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + kwargs = {} if kwargs is None else kwargs + + if func in _TORCH_FUNCTIONS_TABLE[cls]: + return _TORCH_FUNCTIONS_TABLE[cls][func](*args, **kwargs) + + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) + + + def _get_to_kwargs(self, *args, **kwargs): + device, dtype, _, memory_format = torch._C._nn._parse_to(*args, **kwargs) + device = self.device if device is None else device + dtype = self.dtype if dtype is None else dtype + memory_format = ( + memory_format if memory_format is not None else torch.preserve_format + ) + kwargs = { + "device": device, + "dtype": dtype, + "memory_format": memory_format, + } + return kwargs + + def to(self, *args, **kwargs): + kwargs = self._get_to_kwargs(*args, **kwargs) + return self.__class__( + self.int_data.to(kwargs["device"]), + self.scale.to(kwargs["device"]), + self.zero_point.to(kwargs["device"]), + self.block_size, + self.shape, + self.quant_min, + self.quant_max, + self.zero_point_domain, + **kwargs, + ) + + def _apply_fn_to_data(self, fn): + return self.__class__( + fn(self.int_data), + fn(self.scale), + fn(self.zero_point), + self.block_size, + self.shape, + self.quant_min, + self.quant_max, + self.zero_point_domain, + dtype=self.dtype, + strides=self.stride(), + ) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + # Note: we only added cpu path here for 8da4w, this is for executorch, in the future + # 1. we'll add cpu/cuda version (int4mm etc.) + # 2. we'll need to hide the 8da4w executorch version under things like layouts (we also have multiple impl for cpu kernel as Michael mentioned), so it will be something like + # cpu device + et laytout --> gives current 8da4w executorch representation + # cpu device + avx layout --> gives optimized kernel for 8da4w in avx cpu etc. + # cuda device + some layout --> gives cuda kernel + + # two scenarios where we currently fall back to vanilla mm: + # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized + # kernels in CPU as well, see the note above + # 2 - we're given non-floats - quantizing long to int8 is crazy + + if func in _ATEN_OPS_TABLE[cls]: + return _ATEN_OPS_TABLE[cls][func](func, *args, **kwargs) + + raise NotImplementedError( + f"AffineQuantizedTensor dispatch: attempting to run {func}, this is not supported" + ) + +@implements_aqt_torch_function(torch.nn.functional.linear) +def functional_linear(*args, **kwargs): + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + args[2] if len(args) > 2 else None, + ) + is_cuda = weight_qtensor.is_cuda + is_cpu = weight_qtensor.device == torch.device("cpu") + if isinstance(weight_qtensor, AffineQuantizedTensor): + weight_is_int8 = _aqt_is_int8(weight_qtensor) + weight_is_uint4 = _aqt_is_uint4(weight_qtensor) + + if isinstance(input_tensor, AffineQuantizedTensor): + # if input tensor is quantized, either dispatch to the int8 mm kernel + # or just dequantize the input tensor + input_is_int8 = _aqt_is_int8_reduced_range(input_tensor) + input_tensor_dtype_is_expected = input_tensor.dtype in [ + torch.float, + torch.bfloat16 + ] + if ( + is_cuda and + input_is_int8 and + input_tensor_dtype_is_expected + ): + # + # 1. do the matrix form of dot(X_i, W_j) + # + # + # 2. rescale the output + # + # in cases with large matrices, y_dot_int32 can grow sufficiently + # large that y_dot_int32 * a float16 scale is greater than the maximum + # value of a float 16, (which results in a value of inf even if multiplying + # by the other scale would bring it within the expected range) + + x_vals_int8 = input_tensor.int_data + x_scales = input_tensor.scale + w_vals_int8_t = weight_qtensor.int_data.contiguous().t() + w_scales = weight_qtensor.scale + tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) + y_dot_scaled = int_scaled_matmul(tmp, w_vals_int8_t, x_scales.reshape(-1, 1)) + + y = (y_dot_scaled * w_scales).reshape( + *x_vals_int8.shape[:-1], y_dot_scaled.shape[-1] + ) + + # can downcast only at the very end + output_dtype = input_tensor.dtype + y = y.to(output_dtype) + if bias is not None: + y += bias + return y + else: + input_tensor = input_tensor.dequantize() + + # weight only quantization + # TODO: enable cpu and mps path as well + # TODO: make sure weight dimension matches the expectation of the int4mm kernel + # TODO: move this to TinygemmAffineQuantizedTensor + if ( + is_cuda and + weight_is_uint4 and + weight_qtensor.dtype == torch.bfloat16 and + len(weight_qtensor.shape) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.zero_point_domain == ZeroPointDomain.FLOAT + ): + # groupwise int4 quantization + # TODO: currently doing packing on the fly, we'll need to figure out + # the API to do packing before hand + # TODO: expose the arg + innerKTiles = 8 + packed_weight = torch.ops.aten._convert_weight_to_int4pack(weight_qtensor.int_data.to(torch.int32), innerKTiles) + scales_and_zeros = pack_tinygemm_scales_and_zeros(weight_qtensor.scale, weight_qtensor.zero_point) + groupsize = weight_qtensor.block_size[-1] + return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) + elif ( + is_cpu and + weight_is_int8 and + len(weight_qtensor.shape) == 2 and + len(weight_qtensor.block_size) == 2 and + weight_qtensor.block_size[0] == 1 and + weight_qtensor.block_size[1] == weight_qtensor.shape[1] + ): + # TODO: enable mps path as well + # per channel int8 weight only quantizated mm + return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) + else: + weight_tensor = weight_qtensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + else: + if isinstance(input_tensor, AffineQuantizedTensor): + input_tensor = input_tensor.dequantize() + return torch.nn.functional.linear(input_tensor, weight_tensor, bias) + + +@implements_aqt_aten_ops([aten.mm.default, aten.addmm.default]) +def aten_mm(func, *args, **kwargs): + if not args[0].is_floating_point(): + raise NotImplementedError(f"{func} is not implemented for non floating point input") + + if func == aten.addmm.default: + assert args[1].shape[-1] == args[2].shape[0], ( + f"need mat1 shape: {args[1].shape} final" + f"dim to match mat2 shape: {args[2].shape} first dim " + ) + input_tensor, weight_qtensor, bias = ( + args[1], + args[2], + args[0], + ) + else: + assert args[0].shape[-1] == args[1].shape[0], ( + f"need mat1 shape: {args[0].shape} final dim" + f"to match mat2 shape: {args[1].shape} first dim" + ) + input_tensor, weight_qtensor, bias = ( + args[0], + args[1], + None if len(args) == 2 else args[2], + ) + weight_tensor = weight_qtensor.dequantize() + return func(input_tensor, weight_tensor, bias) + +@implements_aqt_aten_ops([aten.detach.default]) +def detach(func, *args, **kwargs): + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) + ) + + +@implements_aqt_aten_ops([aten.clone.default]) +def clone(func, *args, **kwargs): + return return_and_correct_aliasing( + func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) + ) + + +@implements_aqt_aten_ops([aten._to_copy.default]) +def _to_copy(func, *args, **kwargs): + return return_and_correct_aliasing( + func, + args, + kwargs, + args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), + ) + +@implements_aqt_aten_ops([aten.t.default]) +def t(func, *args, **kwargs): + # TODO: need to implement this + # args[0].transposed = not args[0].transposed + # new = args[0]._change_shape(args[0].shape[::-1]) + # return return_and_correct_aliasing(func, args, kwargs, new) + raise Exception("transpose not implemented yet") + +to_aq = AffineQuantizedTensor.from_float diff --git a/torchao/quantization/subclass.py b/torchao/quantization/subclass.py index 6e844530d4..ee13512e9f 100644 --- a/torchao/quantization/subclass.py +++ b/torchao/quantization/subclass.py @@ -14,58 +14,25 @@ dynamically_quantize_per_channel, groupwise_affine_quantize_tensor, quant_int8_dynamic_per_token_linear, - pack_tinygemm_scales_and_zeros, unpack_tinygemm_scales_and_zeros, groupwise_affine_quantize_tensor_from_qparams, - choose_qparams_affine, - quantize_affine, - dequantize_affine, - ZeroPointDomain, MappingType, ) -from torchao.kernel.intmm import int_scaled_matmul from .utils import find_multiple from typing import Tuple, Optional, Callable, Dict, Any -from collections import defaultdict -import functools __all__ = [ "Int8DynamicallyQuantizedLinearWeight", "Int8WeightOnlyQuantizedLinearWeight", "Int4WeightOnlyQuantizedLinearWeight", - "AffineQuantizedTensor", "LinearActQuantizedTensor", + "to_laq", ] aten = torch.ops.aten -def _aqt_is_int8(aqt): - """Check if an AffineQuantizedTensor is int8 quantized Tensor""" - return ( - aqt.int_data.dtype == torch.int8 and - aqt.quant_min is None or aqt.quant_min == -128 and - aqt.quant_max is None or aqt.quant_max == 127 - ) - -def _aqt_is_int8_reduced_range(aqt): - return ( - aqt.int_data.dtype == torch.int8 and - aqt.quant_min == -127 and - aqt.quant_max is None or aqt.quant_max == 127 - ) - -def _aqt_is_uint4(aqt): - """Check if an AffineQuantizedTensor is uint4 quantized Tensor""" - # TODO: use torch.uint4 - return ( - aqt.int_data.dtype == torch.int32 and - aqt.quant_min is None or aqt.quant_min == 0 and - aqt.quant_max is None or aqt.quant_max == 15 - ) - - class QuantizedLinearWeightBase(torch.Tensor): """ Base quantized tensor subclass for quantized linear weights. When the from_float method is used, @@ -630,409 +597,6 @@ def to_qtensor_components(cls, input_float, groupsize=128, inner_k_tiles=8): return int_data, scales_and_zeros, False, groupsize, inner_k_tiles -# TODO: merge with nf4 implements decorator -# aten op to their __torch_dispatch__ implemnetations for the tensor subclass -_ATEN_OPS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) - -def implements_aten_ops(cls, aten_ops): - """Use this decorator to implement a function for an aten op in __torch_dispatch__""" - - def decorator(func): - for op in aten_ops: - _ATEN_OPS_TABLE[cls][op] = func - return func - - return decorator - -_TORCH_FUNCTIONS_TABLE: Dict[Callable, Dict[Any, Any]] = defaultdict(dict) - -def implements_torch_function(cls, torch_function): - def decorator(func): - functools.update_wrapper(func, torch_function) - _TORCH_FUNCTIONS_TABLE[cls][torch_function] = func - return func - - return decorator - -def implements_aqt_aten_ops(aten_ops): - return implements_aten_ops(AffineQuantizedTensor, aten_ops) - -def implements_aqt_torch_function(torch_function): - return implements_torch_function(AffineQuantizedTensor, torch_function) - - -class AffineQuantizedTensor(torch.Tensor): - """ - Base affine quantized tensor subclass. When the from_float method is used, - to create an instance of any AffineQuantizedTensor - - The shape and dtype of the tensor subclass represent how the tensor subclass looks externally, - regardless of the internal representation's type or orientation. - - Affine quantization means we quantize the floating point tensor with an affine transformation: - quantized_tensor = float_tensor / scale + zero_point - - fields: - int_data (torch.Tensor): the quantized integer data Tensor - scale (torch.Tensor): the scale Tensor used to map between floating point tensor to quantized tensor - zero_point (torch.Tensor): the zero_point Tensor used to map between floating point tensor to quantized tensor - block_size (Tuple[int, ...]): granularity of quantization, this means the size of the tensor elements that's sharing the same qparam - e.g. when size is the same as the input tensor dimension, we are using per tensor quantization - shape (torch.Size): the shape for the Tensor - quant_min (Optional[int]): minimum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` - quant_max (Optional[int]): maximum quantized value for the Tensor, if not specified, it will be derived from dtype of `int_data` - zero_point_domain (ZeroPointDomain): the domain that zero_point is in, should be eitehr integer or float - if zero_point is in integer domain, zero point is added to the quantized integer value during - quantization - if zero_point is in floating point domain, zero point is subtracted from the floating point (unquantized) - value during quantization - default is ZeroPointDomain.INT - input_quant_func (Optional[Callable]): function for quantizing the input float Tensor to a quantized tensor subclass object, that takes float Tensor as input and outputs an AffineQuantizedTensor object - dtype: dtype for external representation of the tensor, e.g. torch.float32 - """ - - @staticmethod - def __new__( - cls, - int_data: torch.Tensor, - scale: torch.Tensor, - zero_point: torch.Tensor, - block_size: Tuple[int, ...], - shape: torch.Size, - quant_min: Optional[int] = None, - quant_max: Optional[int] = None, - zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, - dtype=None, - strides=None, - ): - kwargs = {} - kwargs["device"] = int_data.device - kwargs["layout"] = ( - kwargs.get("layout") if kwargs.get("layout", False) else int_data.layout - ) - if dtype is None: - dtype = scale.dtype - kwargs["dtype"] = dtype - if strides is not None: - kwargs["strides"] = strides - kwargs["requires_grad"] = False - return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] - - def __init__( - self, - int_data: torch.Tensor, - scale: torch.Tensor, - zero_point: torch.Tensor, - block_size: Tuple[int, ...], - shape: torch.Size, - quant_min: Optional[int] = None, - quant_max: Optional[int] = None, - zero_point_domain: ZeroPointDomain = ZeroPointDomain.INT, - dtype=None, - strides=None, - ): - self.int_data = int_data - self.scale = scale - self.zero_point = zero_point - self.block_size = block_size - self.quant_min = quant_min - self.quant_max = quant_max - self.zero_point_domain = zero_point_domain - - def __repr__(self): - return ( - f"{self.__class__.__name__}(data={self.dequantize()}, shape={self.shape}, " - f"device={self.device}, dtype={self.dtype}, requires_grad={self.requires_grad})" - ) - - def dequantize(self, output_dtype=None): - if output_dtype is None: - output_dtype = self.dtype - return dequantize_affine(self.int_data, self.block_size, self.scale, self.zero_point, self.int_data.dtype, self.quant_min, self.quant_max, self.zero_point_domain, output_dtype=output_dtype) - - def __tensor_flatten__(self): - return ["int_data", "scale", "zero_point"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype] - - @classmethod - def __tensor_unflatten__( - cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride - ): - int_data, scale, zero_point = tensor_data_dict["int_data"], tensor_data_dict["scale"], tensor_data_dict["zero_point"] - block_size, shape, quant_min, quant_max, zero_point_domain, dtype = tensor_attributes - return cls( - int_data, - scale, - zero_point, - block_size, - shape if outer_size is None else outer_size, - quant_min, - quant_max, - zero_point_domain, - dtype=dtype, - strides=outer_stride, - ) - - @classmethod - def from_float( - cls, - input_float, - mapping_type, - block_size, - target_dtype, - quant_min = None, - quant_max = None, - eps = None, - scale_dtype = None, - zero_point_dtype = None, - preserve_zero = True, - zero_point_domain = ZeroPointDomain.INT, - ): - scale, zero_point = choose_qparams_affine(input_float, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, scale_dtype, zero_point_dtype, preserve_zero, zero_point_domain) - int_data = quantize_affine(input_float, block_size, scale, zero_point, target_dtype, quant_min, quant_max, zero_point_domain) - return cls( - int_data, - scale, - zero_point, - block_size, - input_float.shape, - quant_min, - quant_max, - zero_point_domain, - dtype=input_float.dtype - ) - - @classmethod - def __torch_function__(cls, func, types, args=(), kwargs=None): - kwargs = {} if kwargs is None else kwargs - - if func in _TORCH_FUNCTIONS_TABLE[cls]: - return _TORCH_FUNCTIONS_TABLE[cls][func](*args, **kwargs) - - with torch._C.DisableTorchFunctionSubclass(): - return func(*args, **kwargs) - - - def _get_to_kwargs(self, *args, **kwargs): - device, dtype, _, memory_format = torch._C._nn._parse_to(*args, **kwargs) - device = self.device if device is None else device - dtype = self.dtype if dtype is None else dtype - memory_format = ( - memory_format if memory_format is not None else torch.preserve_format - ) - kwargs = { - "device": device, - "dtype": dtype, - "memory_format": memory_format, - } - return kwargs - - def to(self, *args, **kwargs): - kwargs = self._get_to_kwargs(*args, **kwargs) - return self.__class__( - self.int_data.to(kwargs["device"]), - self.scale.to(kwargs["device"]), - self.zero_point.to(kwargs["device"]), - self.block_size, - self.shape, - self.quant_min, - self.quant_max, - self.zero_point_domain, - **kwargs, - ) - - def _apply_fn_to_data(self, fn): - return self.__class__( - fn(self.int_data), - fn(self.scale), - fn(self.zero_point), - self.block_size, - self.shape, - self.quant_min, - self.quant_max, - self.zero_point_domain, - dtype=self.dtype, - strides=self.stride(), - ) - - @classmethod - def __torch_dispatch__(cls, func, types, args, kwargs): - # Note: we only added cpu path here for 8da4w, this is for executorch, in the future - # 1. we'll add cpu/cuda version (int4mm etc.) - # 2. we'll need to hide the 8da4w executorch version under things like layouts (we also have multiple impl for cpu kernel as Michael mentioned), so it will be something like - # cpu device + et laytout --> gives current 8da4w executorch representation - # cpu device + avx layout --> gives optimized kernel for 8da4w in avx cpu etc. - # cuda device + some layout --> gives cuda kernel - - # two scenarios where we currently fall back to vanilla mm: - # 1 - when tensor is on CUDA: we'll add this later, we'll also enable dispatching to optimized - # kernels in CPU as well, see the note above - # 2 - we're given non-floats - quantizing long to int8 is crazy - - if func in _ATEN_OPS_TABLE[cls]: - return _ATEN_OPS_TABLE[cls][func](func, *args, **kwargs) - - raise NotImplementedError( - f"AffineQuantizedTensor dispatch: attempting to run {func}, this is not supported" - ) - -@implements_aqt_torch_function(torch.nn.functional.linear) -def functional_linear(*args, **kwargs): - input_tensor, weight_qtensor, bias = ( - args[0], - args[1], - args[2] if len(args) > 2 else None, - ) - is_cuda = weight_qtensor.is_cuda - is_cpu = weight_qtensor.device == torch.device("cpu") - if isinstance(weight_qtensor, AffineQuantizedTensor): - weight_is_int8 = _aqt_is_int8(weight_qtensor) - weight_is_uint4 = _aqt_is_uint4(weight_qtensor) - - if isinstance(input_tensor, AffineQuantizedTensor): - # if input tensor is quantized, either dispatch to the int8 mm kernel - # or just dequantize the input tensor - input_is_int8 = _aqt_is_int8_reduced_range(input_tensor) - input_tensor_dtype_is_expected = input_tensor.dtype in [ - torch.float, - torch.bfloat16 - ] - if ( - is_cuda and - input_is_int8 and - input_tensor_dtype_is_expected - ): - # - # 1. do the matrix form of dot(X_i, W_j) - # - # - # 2. rescale the output - # - # in cases with large matrices, y_dot_int32 can grow sufficiently - # large that y_dot_int32 * a float16 scale is greater than the maximum - # value of a float 16, (which results in a value of inf even if multiplying - # by the other scale would bring it within the expected range) - - x_vals_int8 = input_tensor.int_data - x_scales = input_tensor.scale - w_vals_int8_t = weight_qtensor.int_data.contiguous().t() - w_scales = weight_qtensor.scale - tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) - y_dot_scaled = int_scaled_matmul(tmp, w_vals_int8_t, x_scales.reshape(-1, 1)) - - y = (y_dot_scaled * w_scales).reshape( - *x_vals_int8.shape[:-1], y_dot_scaled.shape[-1] - ) - - # can downcast only at the very end - output_dtype = input_tensor.dtype - y = y.to(output_dtype) - if bias is not None: - y += bias - return y - else: - input_tensor = input_tensor.dequantize() - - # weight only quantization - # TODO: enable cpu and mps path as well - # TODO: make sure weight dimension matches the expectation of the int4mm kernel - # TODO: move this to TinygemmAffineQuantizedTensor - if ( - is_cuda and - weight_is_uint4 and - weight_qtensor.dtype == torch.bfloat16 and - len(weight_qtensor.shape) == 2 and - weight_qtensor.block_size[0] == 1 and - weight_qtensor.zero_point_domain == ZeroPointDomain.FLOAT - ): - # groupwise int4 quantization - # TODO: currently doing packing on the fly, we'll need to figure out - # the API to do packing before hand - # TODO: expose the arg - innerKTiles = 8 - packed_weight = torch.ops.aten._convert_weight_to_int4pack(weight_qtensor.int_data.to(torch.int32), innerKTiles) - scales_and_zeros = pack_tinygemm_scales_and_zeros(weight_qtensor.scale, weight_qtensor.zero_point) - groupsize = weight_qtensor.block_size[-1] - return torch.ops.aten._weight_int4pack_mm(input_tensor.contiguous(), packed_weight, groupsize, scales_and_zeros) - elif ( - is_cpu and - weight_is_int8 and - len(weight_qtensor.shape) == 2 and - len(weight_qtensor.block_size) == 2 and - weight_qtensor.block_size[0] == 1 and - weight_qtensor.block_size[1] == weight_qtensor.shape[1] - ): - # TODO: enable mps path as well - # per channel int8 weight only quantizated mm - return torch.ops.aten._weight_int8pack_mm(input_tensor.contiguous(), weight_qtensor.int_data, weight_qtensor.scale) - else: - weight_tensor = weight_qtensor.dequantize() - return torch.nn.functional.linear(input_tensor, weight_tensor, bias) - else: - if isinstance(input_tensor, AffineQuantizedTensor): - input_tensor = input_tensor.dequantize() - return torch.nn.functional.linear(input_tensor, weight_tensor, bias) - - -@implements_aqt_aten_ops([aten.mm.default, aten.addmm.default]) -def aten_mm(func, *args, **kwargs): - if not args[0].is_floating_point(): - raise NotImplementedError(f"{func} is not implemented for non floating point input") - - if func == aten.addmm.default: - assert args[1].shape[-1] == args[2].shape[0], ( - f"need mat1 shape: {args[1].shape} final" - f"dim to match mat2 shape: {args[2].shape} first dim " - ) - input_tensor, weight_qtensor, bias = ( - args[1], - args[2], - args[0], - ) - else: - assert args[0].shape[-1] == args[1].shape[0], ( - f"need mat1 shape: {args[0].shape} final dim" - f"to match mat2 shape: {args[1].shape} first dim" - ) - input_tensor, weight_qtensor, bias = ( - args[0], - args[1], - None if len(args) == 2 else args[2], - ) - weight_tensor = weight_qtensor.dequantize() - return func(input_tensor, weight_tensor, bias) - -@implements_aqt_aten_ops([aten.detach.default]) -def detach(func, *args, **kwargs): - return return_and_correct_aliasing( - func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) - ) - - -@implements_aqt_aten_ops([aten.clone.default]) -def clone(func, *args, **kwargs): - return return_and_correct_aliasing( - func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) - ) - - -@implements_aqt_aten_ops([aten._to_copy.default]) -def _to_copy(func, *args, **kwargs): - return return_and_correct_aliasing( - func, - args, - kwargs, - args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone), - ) - -@implements_aqt_aten_ops([aten.t.default]) -def t(func, *args, **kwargs): - # TODO: need to implement this - # args[0].transposed = not args[0].transposed - # new = args[0]._change_shape(args[0].shape[::-1]) - # return return_and_correct_aliasing(func, args, kwargs, new) - raise Exception("transpose not implemented yet") - - class LinearActQuantizedTensor(torch.Tensor): """ Applies activation quantization for linear operator @@ -1072,15 +636,8 @@ def __tensor_unflatten__( ) @classmethod - def from_float( - cls, - input_float, - input_quant_func, - ): - return cls( - input_float, - input_quant_func, - ) + def from_float(cls, input_float, input_quant_func): + return cls(input_float, input_quant_func) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): @@ -1151,5 +708,4 @@ def __torch_dispatch__(cls, func, types, args, kwargs): f"LinearActQuantizedTensor dispatch: attempting to run {func}, this is not supported" ) -to_aqt = AffineQuantizedTensor.from_float -to_laqt = LinearActQuantizedTensor.from_float +to_laq = LinearActQuantizedTensor.from_float From 90b5e1797bfa2b252e2ec26dc1c10707034ec5bb Mon Sep 17 00:00:00 2001 From: Driss Guessous <32754868+drisspg@users.noreply.github.com> Date: Fri, 24 May 2024 15:44:01 -0700 Subject: [PATCH 54/61] Fix Readme and remove unused kernel (#270) * fix reamde and remove unused kernel * remove unused tests --------- --- README.md | 16 +++- test/test_ops.py | 15 ---- torchao/csrc/cuda/nms.cu | 181 --------------------------------------- torchao/csrc/nms.cpp | 8 -- torchao/ops.py | 21 +---- 5 files changed, 16 insertions(+), 225 deletions(-) delete mode 100644 torchao/csrc/cuda/nms.cu delete mode 100644 torchao/csrc/nms.cpp diff --git a/README.md b/README.md index 150c67b512..2328c67c8b 100644 --- a/README.md +++ b/README.md @@ -29,9 +29,23 @@ git clone https://github.com/pytorch/ao cd ao pip install -r requirements.txt pip install -r dev-requirements.txt -pip install . ``` +There are two options; +-If you plan to be developing the library run: +```Shell +python setup.py develop +``` + +If you want to install from source run +```Shell +python setup.py install +``` + +** Note: +Since we are building pytorch c++/cuda extensions by default, running `pip install .` will +not work. + ### Quantization ```python diff --git a/test/test_ops.py b/test/test_ops.py index d73ae536ac..6ce6a4afba 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -30,21 +30,6 @@ def _create_tensors_with_iou(self, N, iou_thresh): scores = torch.rand(N) return boxes, scores - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.3 or lower") - def test_nms(self): - iou = 0.2 - boxes, scores = self._create_tensors_with_iou(1000, iou) - boxes = boxes.cuda() - scores = scores.cuda() - - # smoke test - _ = torchao.ops.nms(boxes, scores, iou) - - # comprehensive testing - test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] - opcheck(torch.ops.torchao.nms, (boxes, scores, iou), test_utils=test_utils) - def _create_fp6_inputs(self, BS: int, OC: int, IC: int): # Randomly initialize each bytes. The highest value for randint() is set the the max value of uint32_t. fp6_weight = torch.randint(4294967295, (OC, IC // 16 * 3)).to(torch.int) diff --git a/torchao/csrc/cuda/nms.cu b/torchao/csrc/cuda/nms.cu deleted file mode 100644 index 5bbbff8d79..0000000000 --- a/torchao/csrc/cuda/nms.cu +++ /dev/null @@ -1,181 +0,0 @@ -#include -#include -#include -#include -#include - -namespace torchao { - -namespace { - -#define CUDA_1D_KERNEL_LOOP_T(i, n, index_t) \ - for (index_t i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); \ - i += (blockDim.x * gridDim.x)) - -#define CUDA_1D_KERNEL_LOOP(i, n) CUDA_1D_KERNEL_LOOP_T(i, n, int) - -template -constexpr __host__ __device__ inline integer ceil_div(integer n, integer m) { - return (n + m - 1) / m; -} - -int const threadsPerBlock = sizeof(unsigned long long) * 8; - -template -__device__ inline bool devIoU( - T const* const a, - T const* const b, - const float threshold) { - T left = max(a[0], b[0]), right = min(a[2], b[2]); - T top = max(a[1], b[1]), bottom = min(a[3], b[3]); - T width = max(right - left, (T)0), height = max(bottom - top, (T)0); - using acc_T = at::acc_type; - acc_T interS = (acc_T)width * height; - acc_T Sa = ((acc_T)a[2] - a[0]) * (a[3] - a[1]); - acc_T Sb = ((acc_T)b[2] - b[0]) * (b[3] - b[1]); - return (interS / (Sa + Sb - interS)) > threshold; -} - -template -__global__ void nms_kernel_impl( - int n_boxes, - double iou_threshold, - const T* dev_boxes, - unsigned long long* dev_mask) { - const int row_start = blockIdx.y; - const int col_start = blockIdx.x; - - if (row_start > col_start) - return; - - const int row_size = - min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); - const int col_size = - min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); - - __shared__ T block_boxes[threadsPerBlock * 4]; - if (threadIdx.x < col_size) { - block_boxes[threadIdx.x * 4 + 0] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; - block_boxes[threadIdx.x * 4 + 1] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; - block_boxes[threadIdx.x * 4 + 2] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; - block_boxes[threadIdx.x * 4 + 3] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; - } - __syncthreads(); - - if (threadIdx.x < row_size) { - const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; - const T* cur_box = dev_boxes + cur_box_idx * 4; - int i = 0; - unsigned long long t = 0; - int start = 0; - if (row_start == col_start) { - start = threadIdx.x + 1; - } - for (i = start; i < col_size; i++) { - if (devIoU(cur_box, block_boxes + i * 4, iou_threshold)) { - t |= 1ULL << i; - } - } - const int col_blocks = ceil_div(n_boxes, threadsPerBlock); - dev_mask[cur_box_idx * col_blocks + col_start] = t; - } -} - -at::Tensor nms_kernel( - const at::Tensor& dets, - const at::Tensor& scores, - double iou_threshold) { - TORCH_CHECK(dets.is_cuda(), "dets must be a CUDA tensor"); - TORCH_CHECK(scores.is_cuda(), "scores must be a CUDA tensor"); - - TORCH_CHECK( - dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D"); - TORCH_CHECK( - dets.size(1) == 4, - "boxes should have 4 elements in dimension 1, got ", - dets.size(1)); - TORCH_CHECK( - scores.dim() == 1, - "scores should be a 1d tensor, got ", - scores.dim(), - "D"); - TORCH_CHECK( - dets.size(0) == scores.size(0), - "boxes and scores should have same number of elements in ", - "dimension 0, got ", - dets.size(0), - " and ", - scores.size(0)) - - at::cuda::CUDAGuard device_guard(dets.device()); - - if (dets.numel() == 0) { - return at::empty({0}, dets.options().dtype(at::kLong)); - } - - auto order_t = std::get<1>( - scores.sort(/*stable=*/true, /*dim=*/0, /* descending=*/true)); - auto dets_sorted = dets.index_select(0, order_t).contiguous(); - - int dets_num = dets.size(0); - - const int col_blocks = ceil_div(dets_num, threadsPerBlock); - - at::Tensor mask = - at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); - - dim3 blocks(col_blocks, col_blocks); - dim3 threads(threadsPerBlock); - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - dets_sorted.scalar_type(), "nms_kernel", [&] { - nms_kernel_impl<<>>( - dets_num, - iou_threshold, - dets_sorted.data_ptr(), - (unsigned long long*)mask.data_ptr()); - }); - - at::Tensor mask_cpu = mask.to(at::kCPU); - unsigned long long* mask_host = - (unsigned long long*)mask_cpu.data_ptr(); - - std::vector remv(col_blocks); - memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); - - at::Tensor keep = - at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); - int64_t* keep_out = keep.data_ptr(); - - int num_to_keep = 0; - for (int i = 0; i < dets_num; i++) { - int nblock = i / threadsPerBlock; - int inblock = i % threadsPerBlock; - - if (!(remv[nblock] & (1ULL << inblock))) { - keep_out[num_to_keep++] = i; - unsigned long long* p = mask_host + i * col_blocks; - for (int j = nblock; j < col_blocks; j++) { - remv[j] |= p[j]; - } - } - } - - AT_CUDA_CHECK(cudaGetLastError()); - return order_t.index( - {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) - .to(order_t.device(), keep.scalar_type())}); -} - -} // namespace - -TORCH_LIBRARY_IMPL(torchao, CUDA, m) { - m.impl("torchao::nms", &nms_kernel); -} - -} // namespace torchao diff --git a/torchao/csrc/nms.cpp b/torchao/csrc/nms.cpp deleted file mode 100644 index 5cc26d1593..0000000000 --- a/torchao/csrc/nms.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include -#include -#include - -TORCH_LIBRARY_FRAGMENT(torchao, m) { - m.impl_abstract_pystub("torchao.ops"); - m.def("nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor"); -} diff --git a/torchao/ops.py b/torchao/ops.py index fcc6ae9364..05a1668399 100644 --- a/torchao/ops.py +++ b/torchao/ops.py @@ -10,26 +10,6 @@ def decorator(func): return torch.library.impl_abstract(f"{name}")(func) return decorator -def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: - """ - See https://pytorch.org/vision/main/generated/torchvision.ops.nms.html - """ - return torch.ops.torchao.nms.default(boxes, scores, iou_threshold) - - -# Defines the meta kernel / fake kernel / abstract impl -@register_custom_op("torchao::nms") -def _(dets, scores, iou_threshold): - torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D") - torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}") - torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}") - torch._check( - dets.size(0) == scores.size(0), - lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}", - ) - ctx = torch._custom_ops.get_ctx() - num_to_keep = ctx.create_unbacked_symint() - return dets.new_empty(num_to_keep, dtype=torch.long) def prepack_fp6_weight(fp6_weight: Tensor) -> Tensor: @@ -45,6 +25,7 @@ def prepack_fp6_weight(fp6_weight: Tensor) -> Tensor: return torch.ops.torchao.prepack_fp6_weight.default(fp6_weight) +# Defines the meta kernel / fake kernel / abstract impl @register_custom_op("torchao::prepack_fp6_weight") def _(fp6_weight): torch._check(fp6_weight.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_weight.dim()}D") From bea1927234f0ecc10f2553a52a16c20af25d145f Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Fri, 24 May 2024 17:40:30 -0700 Subject: [PATCH 55/61] Kernel docs (#274) --- torchao/kernel/intmm.py | 59 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 5 deletions(-) diff --git a/torchao/kernel/intmm.py b/torchao/kernel/intmm.py index d2afa66a0a..8491a2ba6c 100644 --- a/torchao/kernel/intmm.py +++ b/torchao/kernel/intmm.py @@ -21,10 +21,24 @@ from torch._dynamo import is_compiling as dynamo_is_compiling from torch._higher_order_ops.out_dtype import out_dtype def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor: + """ + Performs a safe integer matrix multiplication, considering different paths for + torch.compile, cublas, and fallback cases. + + Args: + input (torch.Tensor): The input tensor of shape [i, j]. + mat2 (torch.Tensor): The matrix to multiply with, of shape [j, k]. + + Returns: + torch.Tensor: The result of the matrix multiplication. + + Raises: + AssertionError: If the tensors are not on the same device. + """ # torch.compile path if dynamo_is_compiling() or "FakeTensor" in input.__repr__(): return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2) - + # error checking for cublas path assert ( mat2.device == input.device @@ -39,13 +53,13 @@ def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor: and j_is_nonzero_multiple_of_8 and k_is_nonzero_multiple_of_8 ) - + if device_cpu or bad_dimensions_for_cublas: # fallback path return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to( input.device.type ) - + # cublas paths if not mat2.is_contiguous(): # silently gives incorrect result without this mat2 = mat2.contiguous() @@ -58,18 +72,53 @@ def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor: return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2) else: def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor: + """ + Performs a fallback integer matrix multiplication for torch versions before 2.2. + + Args: + input (torch.Tensor): The input tensor of shape [i, j]. + mat2 (torch.Tensor): The matrix to multiply with, of shape [j, k]. + + Returns: + torch.Tensor: The result of the matrix multiplication in int32. + """ # We can improve on this by writing Triton code that works for older versions of Triton # that ship with 2.1 or 2.0. return torch.matmul(input.to(torch.float32), mat2.to(torch.float32)).to(torch.int32) -def int_matmul(a, b): +def int_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: + """ + Performs integer matrix multiplication using intmm_triton if available and autotuner is enabled, + otherwise falls back to safe_int_mm. + + Args: + a (torch.Tensor): The first matrix to multiply. + b (torch.Tensor): The second matrix to multiply. + + Returns: + torch.Tensor: The result of the matrix multiplication. + """ if intmm_triton is not None and AUTOTUNER_ENABLE: return torch.ops.torchao.int_matmul(a, b) return safe_int_mm(a, b) -def int_scaled_matmul(a, b, scales1): +def int_scaled_matmul(a: torch.Tensor, b: torch.Tensor, scales1: torch.Tensor) -> torch.Tensor: + """ + Performs scaled integer matrix multiplication. + + Args: + a (torch.Tensor): The first matrix to multiply. + b (torch.Tensor): The second matrix to multiply. + scales1 (torch.Tensor): The scaling factors for the rows of the result. + + Returns: + torch.Tensor: The result of the scaled matrix multiplication. + + Raises: + AssertionError: If the dimensions of the input tensors do not match the expected shapes. + """ M, K = a.shape K, N = b.shape assert M == scales1.size(0) From a7bc592c26c1a6fe7cdc808f8f6001fae1a16581 Mon Sep 17 00:00:00 2001 From: Mark Saroufim Date: Fri, 24 May 2024 22:26:23 -0700 Subject: [PATCH 56/61] Quantization Docstrings (#273) --- torchao/quantization/autoquant.py | 82 ++++++++++++++++++++---- torchao/quantization/quant_api.py | 28 +++++++- torchao/quantization/quant_primitives.py | 75 +++++++++++----------- torchao/quantization/smoothquant.py | 40 +++++++++--- torchao/quantization/unified.py | 18 ++++-- torchao/quantization/utils.py | 2 - torchao/quantization/weight_only.py | 52 +++++++++------ 7 files changed, 215 insertions(+), 82 deletions(-) diff --git a/torchao/quantization/autoquant.py b/torchao/quantization/autoquant.py index 808f7d89d3..9eeb146f55 100644 --- a/torchao/quantization/autoquant.py +++ b/torchao/quantization/autoquant.py @@ -16,8 +16,6 @@ except: from torch._inductor.runtime.runtime_utils import do_bench -from .utils import TORCH_VERSION_AFTER_2_4 - aten = torch.ops.aten AUTOQUANT_CACHE = {} @@ -28,10 +26,21 @@ def check_cache(cls, shapes_and_dtype): def update_cache(cls, shapes_and_dtype, res): AUTOQUANT_CACHE[(cls,)+shapes_and_dtype] = res +# TODO: Document the methods class AutoQuantizableLinearWeight(torch.Tensor): """ - when run, finds best type of quantization for this tensor and swaps itself with that + A subclass of torch.Tensor that, when run, finds the best type of quantization for itself and swaps + its data with the quantized version. + + Args: + weight (torch.Tensor): The initial weight tensor. + qtensor_class_list (list): A list of tensor classes to be considered for quantization. + *args: Additional positional arguments. + mode (list, optional): A list containing mode settings for quantization. The first element is the mode type + (e.g., "relu"), and the second element is the mode value (e.g., None). Defaults to ["relu", None]. + **kwargs: Additional keyword arguments. """ + @staticmethod def __new__(cls, weight, qtensor_class_list, *args, mode=["relu", None], **kwargs): kwargs["device"] = weight.device @@ -214,7 +223,18 @@ def _is_interpolate_mode(mode): class AQMixin(): """ - Mixin to turn normal quantized subclasses into autoquantizable ones + Tests and benchmarks the autoquantization process for the given activation matrix, weight, and bias. + + Args: + act_mat (torch.Tensor): The activation matrix. + weight (torch.Tensor): The weight tensor. + bias (torch.Tensor or None): The bias tensor. + best_time (float): The best time to beat for the quantization process. + mode (list, optional): A list containing mode settings for quantization. The first element is the mode type + (e.g., "relu"), and the second element is the mode value (e.g., None). Defaults to ["relu", None]. + + Returns: + float: The benchmarked time for the autoquantization process. """ @classmethod def _autoquant_test(cls, act_mat, weight, bias, best_time, mode=["relu", None]): @@ -237,6 +257,20 @@ class AQInt8DynamicallyQuantizedLinearWeight(AQMixin, Int8DynamicallyQuantizedLi """ @classmethod def _autoquant_test(cls, act_mat, weight, bias, best_time, mode=["relu", None]): + """ + Tests and benchmarks the autoquantization process with special handling for interpolate mode. + + Args: + act_mat (torch.Tensor): The activation matrix. + weight (torch.Tensor): The weight tensor. + bias (torch.Tensor or None): The bias tensor. + best_time (float): The best time to beat for the quantization process. + mode (list, optional): A list containing mode settings for quantization. The first element is the mode type + (e.g., "relu"), and the second element is the mode value (e.g., None). Defaults to ["relu", None]. + + Returns: + float: The benchmarked time for the autoquantization process. + """ if not _is_interpolate_mode(mode): return super()._autoquant_test(act_mat, weight, bias, best_time, mode) @@ -279,6 +313,17 @@ class AQWeightOnlyQuantizedLinearWeight2(Int8WeightOnlyQuantizedLinearWeight, AQ """ @staticmethod def _quantized_op(act_mat, w_qtensor, bias): + """ + Performs the quantized linear operations + + Args: + act_mat (torch.Tensor): The activation matrix. + w_qtensor (torch.Tensor): The quantized weight tensor. + bias (torch.Tensor or None): The bias tensor. + + Returns: + torch.Tensor: The result of the quantized operation. + """ orig_dtype = act_mat.dtype orig_shape = act_mat.shape act_mat = act_mat.reshape(-1, act_mat.shape[-1], 1) @@ -383,18 +428,33 @@ def change_autoquantizable_to_quantized(model, **kwargs): torch._dynamo.config.automatic_dynamic_shapes = hold torch._dynamo.reset() +# TODO: example_input seems weird to include in the API +# TODO: Document all the modes +# TODO: Mode being a list is weird, should be a string or some object @torch.no_grad() def autoquant(model, example_input=None, qtensor_class_list=DEFAULT_CLASS_LIST, filter_fn=None, mode=["interpolate", .85], **aq_kwargs): """ - wraps model in AutoQuantWrapper, if example_input is provided, runs forward on it, otherwise returns the wrapped model. - AutoQuantWrapper handles instances where model is torch.compiled by first performing autoquantization on the original - model and then letting the torch.compile run/tracing occur. - - Example usage:: - + Wraps the given model in an AutoQuantWrapper. If `example_input` is provided, performs a forward pass on the input. + Otherwise, returns the wrapped model. The AutoQuantWrapper manages cases where the model is torch-compiled by first + performing autoquantization on the original model and then allowing the torch.compile run/tracing to occur. + + Args: + model (torch.nn.Module): The model to be autoquantized. + example_input (Any, optional): An example input for the model. If provided, the function performs a forward pass + on this input. Defaults to None. + qtensor_class_list (list, optional): A list of tensor classes to be used for quantization. Defaults to DEFAULT_CLASS_LIST. + filter_fn (callable, optional): A filter function to apply to the model parameters. Defaults to None. + mode (list, optional): A list containing mode settings for quantization. The first element is the mode type (e.g., "interpolate"), + and the second element is the mode value (e.g., 0.85). Defaults to ["interpolate", .85]. + **aq_kwargs: Additional keyword arguments for the autoquantization process. + + Returns: + torch.nn.Module: The autoquantized and wrapped model. If `example_input` is provided, the function performs a forward pass + on the input and returns the result of the forward pass. + + Example usage: torchao.autoquant(torch.compile(model)) model(*example_input) - """ # the hook we will use to intercept the model forward and perform # autoquantization diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index 39a977dd00..d9b731bace 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -13,6 +13,10 @@ both because primitives were designed based on the fusions that come along with it and because that is how we access the intended quantized and mixed GEMM kernels + +TODO: There are 2 different approaches to quantizing a model. The first and more historically +popular approach is to use module swaps which explicitly change the linear modules and the second +approach is to instead use subclasses to change the interpretation of the linear module """ import torch @@ -51,6 +55,7 @@ "Int4WeightOnlyQuantizer", "quantize", "autoquant", + "_get_subclass_inserter", ] if TORCH_VERSION_AFTER_2_3: @@ -72,8 +77,17 @@ def _replace_with_custom_fn_if_matches_filter( cur_fqn="", ) -> None: """ - For each `child` in `model`, replaces it with `replacement_fn(child)` - if `filter_fn(child)` is `True` + Recursively replaces each child module in `model` with the result of `replacement_fn(child)` + if `filter_fn(child)` returns `True`. + + Args: + model (torch.nn.Module): The model containing modules to be replaced. + replacement_fn (Callable[[torch.nn.Module], torch.nn.Module]): The function to replace matching modules. + filter_fn (Callable[[torch.nn.Module], bool]): The filter function to determine which modules to replace. + cur_fqn (str, optional): The current fully qualified name of the module being processed. Defaults to "". + + Returns: + None """ if filter_fn(model, cur_fqn[:-1]): model = replacement_fn(model) @@ -125,6 +139,16 @@ def apply_dynamic_quant(model, filter_fn=None): import torch.nn.utils.parametrize as parametrize def _get_subclass_inserter(cls, enable_parametrization=False, **kwargs): + """ + Returns a function which inserts the given subclass into all linear modules + in the model. The inserted module will have its weight set to the result of + `cls(mod.weight, **kwargs)`. If parametrization is enabled then this will be done using + torch.nn.utils.parametrize instead of directly setting the attribute on the module. + + Args: + cls (torch.Tensor): The class to insert as a child module. + kwargs (Any): Any additional arguments for the constructor. + """ constructor = kwargs.pop("constructor", "subclass_constructor") from_float = kwargs.pop("method", "from_float") def insert_subclass(lin): diff --git a/torchao/quantization/quant_primitives.py b/torchao/quantization/quant_primitives.py index bc2d44e576..d86966b48c 100644 --- a/torchao/quantization/quant_primitives.py +++ b/torchao/quantization/quant_primitives.py @@ -375,7 +375,6 @@ def choose_qparams_affine( # copy-pasta of https://www.internalfb.com/intern/anp/view/?id=3350736 - def dynamically_quantize_per_tensor( x, quant_min, @@ -401,8 +400,6 @@ def dynamically_quantize_per_tensor( # taken from # https://github.com/mit-han-lab/smoothquant/blob/2f87951dacfb9238d8d657f52ae83a82a3c9ba0c/smoothquant/fake_quant.py#L26 # and slightly modified - - def quantize_activation_per_token_absmax(t): # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1] mapping_type = MappingType.SYMMETRIC @@ -426,10 +423,12 @@ def quantize_activation_per_token_absmax(t): def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype): - # assumes symmetric quantization - # assumes axis == 0 - # assumes dense memory format - # TODO(future): relax ^ as needed + """ + assumes symmetric quantization + assumes axis == 0 + assumes dense memory format + TODO(future): relax ^ as needed + """ assert x.dim() == 2, "only support 2d Tensors" @@ -512,21 +511,22 @@ def quant_int8_matmul( w_scales, out_dtype=torch.float32, ): - # Quantized matmul of int8 operands that accumulates to int32 and returns - # out_dtype. For now, this is written for approximate numerical - # correctness, and things like aligning accumulation behaviors and - # performance optimizations are left for a future PR. - # Assumes that weight quantization is symmetric, i.e. w_zp is 0. - # Assumes that weight quantization is per-channel. - - # see - # https://github.com/google/gemmlowp/blob/master/doc/quantization.md - # for an overview of quantized matmul compute - - # in scalar form, assuming out_dtype is fp32 and zw == 0: - # - # Y_i_j_fp32 = sx * sw (dot(X_i, W_j) - zx * sum(W_j)) - # + """ + Quantized matmul of int8 operands that accumulates to int32 and returns + out_dtype. For now, this is written for approximate numerical + correctness, and things like aligning accumulation behaviors and + performance optimizations are left for a future PR. + Assumes that weight quantization is symmetric, i.e. w_zp is 0. + Assumes that weight quantization is per-channel. + + see + https://github.com/google/gemmlowp/blob/master/doc/quantization.md + for an overview of quantized matmul compute + + in scalar form, assuming out_dtype is fp32 and zw == 0: + + Y_i_j_fp32 = sx * sw (dot(X_i, W_j) - zx * sum(W_j)) + """ assert x_vals_int8.dtype in ( torch.uint8, @@ -571,8 +571,10 @@ def quant_int8_dynamic_per_token_linear( bias, out_dtype, ): - # like F.linear, but with int8 dynamic quantization of activation, - # and a quantized weight + """ + like F.linear, but with int8 dynamic quantization of activation, + and a quantized weight + """ x_vals_int8, x_scales = quantize_activation_per_token_absmax(x) mm_out = quant_int8_per_token_matmul( x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype @@ -589,20 +591,21 @@ def quant_int8_per_token_matmul( w_scales, output_dtype=torch.float32, ): - # Quantized matmul of int8 operands that accumulates to int32 and returns - # output_dtype. For now, this is written for approximate numerical - # Assumes that activation and weight quantization are symmetric, - # i.e. act_zp and w_zp is 0. - # Assumes that weight quantization is per-channel. + """ + Quantized matmul of int8 operands that accumulates to int32 and returns + output_dtype. For now, this is written for approximate numerical + Assumes that activation and weight quantization are symmetric, + i.e. act_zp and w_zp is 0. + Assumes that weight quantization is per-channel. - # see - # https://github.com/google/gemmlowp/blob/master/doc/quantization.md - # for an overview of quantized matmul compute + see + https://github.com/google/gemmlowp/blob/master/doc/quantization.md + for an overview of quantized matmul compute - # in scalar form, assuming output_dtype is fp32 and zw == 0: - # - # Y_i_j_fp32 = sx * sw dot(X_i, W_j) - # + in scalar form, assuming output_dtype is fp32 and zw == 0: + + Y_i_j_fp32 = sx * sw dot(X_i, W_j) + """ assert ( x_vals_int8.dtype == torch.int8 diff --git a/torchao/quantization/smoothquant.py b/torchao/quantization/smoothquant.py index 35b54382c0..dd81bada7e 100644 --- a/torchao/quantization/smoothquant.py +++ b/torchao/quantization/smoothquant.py @@ -34,12 +34,15 @@ def get_scale(X_absmax, W_absmax, alpha=0.5): """ - Calculate the scale based on abs(max(X)), abs(max(W)) and alpha - If X is of dimension `b*n*k` and W is dimension `k*m`, the returned - scale is of dimension `k`. - Note: X_absmax is calculated outside of this function because we - need to keep a running version of it during calibration. W_absmax - is calculated outside of this function for consistency with X_absmax. + Calculate the scale based on abs(max(X)), abs(max(W)), and alpha. + + Args: + X_absmax (torch.Tensor): Absolute maximum values of the input tensor X. + W_absmax (torch.Tensor): Absolute maximum values of the weight tensor W. + alpha (float, optional): Scaling factor. Defaults to 0.5. + + Returns: + torch.Tensor: The calculated scale of dimension `k` if X is of dimension `b*n*k` and W is of dimension `k*m`. """ X_pow = torch.pow(X_absmax, alpha) W_pow = torch.pow(W_absmax, 1.0 - alpha) @@ -210,6 +213,18 @@ def set_debug_x_absmax(self): def swap_linear_with_smooth_fq_linear( model, skip_fqn_list=None, cur_fqn="", alpha=0.5 ) -> None: + """ + Replaces linear layers in the model with their SmoothFakeDynamicallyQuantizedLinear equivalents. + + Args: + model (torch.nn.Module): The model containing linear layers to be replaced. + skip_fqn_list (list of str, optional): List of fully qualified names to skip during replacement. Defaults to None. + cur_fqn (str, optional): The current fully qualified name of the module being processed. Defaults to "". + alpha (float, optional): The scaling factor for SmoothQuant. Defaults to 0.5. + + Returns: + None + """ name_to_child = dict(model.named_children()) for name, child in name_to_child.items(): @@ -228,6 +243,17 @@ def swap_linear_with_smooth_fq_linear( def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None: + """ + Prepares the model for inference by calculating the smoothquant scale for each SmoothFakeDynamicallyQuantizedLinear layer. + + Args: + model (torch.nn.Module): The model containing SmoothFakeDynamicallyQuantizedLinear layers. + debug_skip_calibration (bool, optional): If True, sets the running maximum of activations to a debug value for performance benchmarking. + Defaults to False. + + Returns: + None + """ for _, mod in model.named_modules(): if isinstance(mod, tuple(source_cls_to_target_cls.values())): if debug_skip_calibration: @@ -237,8 +263,6 @@ def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None: # useful for quickly toggling smoothquant debug settings on all smoothquant # modules in a model - - def set_smooth_fq_attribute(model, attribute_name, new_attribute_val): for _, mod in model.named_modules(): if isinstance(mod, tuple(source_cls_to_target_cls.values())): diff --git a/torchao/quantization/unified.py b/torchao/quantization/unified.py index 16112ac0f0..7da915dec7 100644 --- a/torchao/quantization/unified.py +++ b/torchao/quantization/unified.py @@ -1,9 +1,19 @@ import torch from typing import Any +from abc import ABC, abstractmethod + +""" +The vast majority of quantization algorithms follow one of two patterns +1. Single quantize call to create a quantized model with quantized state_dict +2. Flow that needs calibration or training + +This file defines the API for both patterns +""" + -############################# Unified Quantization APIs ############################## # API 1, single quantize call to create a quantized model with quantized state_dict -class Quantizer: +class Quantizer(ABC): + @abstractmethod def quantize( self, model: torch.nn.Module, *args: Any, **kwargs: Any ) -> torch.nn.Module: @@ -13,6 +23,7 @@ def quantize( # API 2, flow that needs calibration or training class TwoStepQuantizer: + @abstractmethod def prepare( self, model: torch.nn.Module, *args: Any, **kwargs: Any ) -> torch.nn.Module: @@ -24,6 +35,3 @@ def convert( ) -> torch.nn.Module: pass - - -############################# Unified Quantization APIs ############################## diff --git a/torchao/quantization/utils.py b/torchao/quantization/utils.py index 948c1357c8..74cb7deb20 100644 --- a/torchao/quantization/utils.py +++ b/torchao/quantization/utils.py @@ -138,8 +138,6 @@ def unwrap_tensor_subclass(model, filter_fn=None): # https://discuss.pytorch.org/t/finding-model-size/130275 - - def get_model_size_in_bytes(model): s = 0 for p in model.parameters(): diff --git a/torchao/quantization/weight_only.py b/torchao/quantization/weight_only.py index 099df0f17f..bb6a0136ef 100644 --- a/torchao/quantization/weight_only.py +++ b/torchao/quantization/weight_only.py @@ -5,19 +5,34 @@ # LICENSE file in the root directory of this source tree. import torch - from .quant_primitives import dynamically_quantize_per_channel __all__ = ["WeightOnlyInt8QuantLinear"] - class WeightOnlyInt8QuantLinear(torch.nn.Linear): """ This class is a replacement for `torch.nn.Linear`. It implements a - mixed dtype matmul using int8 symmetric per-channel weight quantization + mixed dtype matrix multiplication using int8 symmetric per-channel weight quantization. + + The primary goal of this class is to leverage int8 quantization for weights to reduce the + memory footprint and computational requirements while performing linear transformations. + This can be particularly beneficial for deploying models in low latency environments + + Attributes: + w_int8 (torch.Tensor): The quantized weights in int8 format. + scales (torch.Tensor): The scaling factors for each channel to convert the quantized + weights back to floating point format during the forward pass. """ def __init__(self, *args, **kwargs): + """ + Initializes the WeightOnlyInt8QuantLinear module. + + Args: + *args: Variable length argument list for `torch.nn.Linear`. + **kwargs: Arbitrary keyword arguments. + Must include 'w_int8' (int8 quantized weights) and 'scales' (scaling factors). + """ w_int8 = kwargs.pop("w_int8") scales = kwargs.pop("scales") super().__init__(*args, **kwargs) @@ -25,21 +40,20 @@ def __init__(self, *args, **kwargs): self.register_buffer("w_int8", w_int8) self.register_buffer("scales", scales) - def forward(self, x, *args, **kwargs): + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ - Performs the forward pass of the quantized linear layer which consists - ofmixed dtype matmul using int8 symmetric per-channel weight quantization + Performs the forward pass of the quantized linear layer, which consists of + mixed dtype matrix multiplication using int8 symmetric per-channel weight quantization. Args: - X (torch.Tensor): The input floating point tensor to the quantized linear layer. + x (torch.Tensor): The input floating point tensor to the quantized linear layer. + *args: Additional positional arguments. + **kwargs: Additional keyword arguments. Returns: - torch.Tensor: The output floating point tensor after the quantized matmul and rescale. - + torch.Tensor: The output floating point tensor after the quantized matrix multiplication + and rescale. """ - # if len(x.shape)<=2: - # y = torch.mm(x, self.w_int8.to(x.dtype)) * self.scales - # else: # turn x into 2d tensor, then undo it for y x_view = x.view(-1, x.shape[-1]) y = torch.mm(x_view, self.w_int8.to(x.dtype)) * self.scales y = y.reshape(*x.shape[:-1], -1) @@ -48,23 +62,25 @@ def forward(self, x, *args, **kwargs): return y @classmethod - def from_float(cls, mod): + def from_float(cls, mod: torch.nn.Linear): """ - Converts a `mod` of class `torch.nn.Linear` to the - `WeightOnlyInt8QuantLinear` class + Converts a `torch.nn.Linear` module to a `WeightOnlyInt8QuantLinear` module. + + This method performs the conversion by dynamically quantizing the weights of the original + floating point linear layer to int8 format and creating a new `WeightOnlyInt8QuantLinear` + instance with these quantized weights and the corresponding scaling factors. Args: mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert. Returns: - WeightOnlyInt8QuantLinear: The converted quantized linear module. - + WeightOnlyInt8QuantLinear: The converted quantized linear module with int8 weights. """ w_fp32 = mod.weight w_int8, scales, _zp = dynamically_quantize_per_channel( w_fp32, -128, 127, torch.int8 ) - # create the new module with a toy size to ensure initialization is fast + # Create the new module with a toy size to ensure initialization is fast fake_in_features, fake_out_features = 8, 8 new_mod = cls( fake_in_features, From 4ca3985be603e6496da7ec57adf1942c8b32a78e Mon Sep 17 00:00:00 2001 From: Thien Tran Date: Sun, 26 May 2024 02:17:18 +0800 Subject: [PATCH 57/61] Improve primitives for FP6 quant (#248) --- dev-requirements.txt | 3 + docs/source/api_ref_dtypes.rst | 2 + setup.py | 3 +- test/dtypes/test_float6_e3m2.py | 127 +++++++++ test/test_ops.py | 34 +-- torchao/__init__.py | 13 +- torchao/csrc/cuda/fp6_llm/weight_quant.cu | 69 +---- torchao/csrc/fp6_llm/float6_e3m2.cpp | 319 ++++++++++++++++++++++ torchao/csrc/fp6_llm/fp6_llm.cpp | 8 +- torchao/dtypes/__init__.py | 3 + torchao/dtypes/float6_e3m2.py | 178 ++++++++++++ torchao/ops.py | 37 +-- 12 files changed, 679 insertions(+), 117 deletions(-) create mode 100644 test/dtypes/test_float6_e3m2.py create mode 100644 torchao/csrc/fp6_llm/float6_e3m2.cpp create mode 100644 torchao/dtypes/float6_e3m2.py diff --git a/dev-requirements.txt b/dev-requirements.txt index 6dadb274aa..156e8766d2 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -12,3 +12,6 @@ pandas # Custom CUDA Extensions ninja + +# for FP6-LLM (can be removed once we remove fp16_to_fp6_original()) +qtorch diff --git a/docs/source/api_ref_dtypes.rst b/docs/source/api_ref_dtypes.rst index 4cb797beb4..36c3c9b4eb 100644 --- a/docs/source/api_ref_dtypes.rst +++ b/docs/source/api_ref_dtypes.rst @@ -12,6 +12,8 @@ torchao.dtypes to_nf4 UInt4Tensor + to_float6_e3m2 + from_float6_e3m2 .. _NF4Tensor - add after fixing torchao/dtypes/nf4tensor.py:docstring diff --git a/setup.py b/setup.py index 5d1f32da2b..65ec21e15f 100644 --- a/setup.py +++ b/setup.py @@ -46,11 +46,12 @@ def get_extensions(): use_cuda = torch.cuda.is_available() and CUDA_HOME is not None extension = CUDAExtension if use_cuda else CppExtension - extra_link_args = [] + extra_link_args = ["-fopenmp"] extra_compile_args = { "cxx": [ "-O3" if not debug_mode else "-O0", "-fdiagnostics-color=always", + "-fopenmp", ], "nvcc": [ "-O3" if not debug_mode else "-O0", diff --git a/test/dtypes/test_float6_e3m2.py b/test/dtypes/test_float6_e3m2.py new file mode 100644 index 0000000000..b821504731 --- /dev/null +++ b/test/dtypes/test_float6_e3m2.py @@ -0,0 +1,127 @@ +import torch +from torch.testing._internal.common_utils import ( + TestCase, + instantiate_parametrized_tests, + parametrize, + run_tests, +) +from torchao.dtypes.float6_e3m2 import to_float6_e3m2, from_float6_e3m2 + + +_DTYPES = [torch.float32, torch.float16, torch.bfloat16] +_DEVICES = ["cpu"] + (["cuda"] if torch.cuda.is_available() else []) + + +class TestFp6(TestCase): + + @parametrize("device", _DEVICES) + @parametrize("dtype", _DTYPES) + @parametrize( + "input_output", + [ + (0.0, 0b000000), # exact values + (1.0, 0b001100), # normal numbers + (1.25, 0b001101), + (28.0, 0b011111), # max + (0.1875, 0b000011), # subnormal number + (0.0625, 0b000001), # min + (29.0, 0b011111), # normal round down + (26.0, 0b011110), # normal round to nearest even + (0.1251, 0b000010), # subnormal round down + (0.0314, 0b000001), # subnormal round up + (0.03, 0b000000), # underflow + ], + ) + def test_to_float6_e3m2_no_bit_packing_correctness(self, device, dtype, input_output): + input, output = input_output + input = torch.tensor(input, device=device, dtype=dtype) + assert to_float6_e3m2(input, no_bit_packing=True).item() == output + + @parametrize("device", _DEVICES) + @parametrize("dtype", _DTYPES) + def test_to_float6_e3m2_bit_packing_correctness(self, device, dtype): + x = torch.randn(128, 128, device=device, dtype=dtype) + results_unpacked = to_float6_e3m2(x, no_bit_packing=True) + results_packed = to_float6_e3m2(x) + + val0, val1, val2, val3 = results_unpacked.unflatten(-1, (-1, 4)).unbind(-1) + bits0 = (val0 << 2) | (val1 >> 4) # 0000 0011 + bits1 = (val1 << 4) | (val2 >> 2) # 1111 2222 + bits2 = (val2 << 6) | (val3); # 2233 3333 + + expected_packed = torch.stack([bits0, bits1, bits2], dim=-1).flatten(-2) + assert (results_packed == expected_packed).all() + + @parametrize("device", _DEVICES) + @parametrize("shape", [(), (0,), (10,), (20, 20)]) + def test_to_float6_e3m2_no_bit_packing_shape(self, device, shape): + x = torch.randn(shape, device=device) + result = to_float6_e3m2(x, no_bit_packing=True) + assert result.shape == shape + + @parametrize("device", _DEVICES) + @parametrize("shape", [(4,), (20, 20)]) + def test_to_float6_e3m2_bit_packing_shape(self, device, shape): + x = torch.randn(shape, device=device) + result = to_float6_e3m2(x) + assert result.shape == shape[:-1] + (shape[-1] // 4 * 3,) + + @parametrize("device", _DEVICES) + @parametrize("dtype", _DTYPES) + @parametrize("no_bit_packing", [False, True]) + def test_to_float6_e3m2_compile(self, device, dtype, no_bit_packing): + x = torch.randn(20, 20, device=device, dtype=dtype) + expected = to_float6_e3m2(x, no_bit_packing=no_bit_packing) + + to_float6_e3m2_compiled = torch.compile(to_float6_e3m2) + actual = to_float6_e3m2_compiled(x, no_bit_packing=no_bit_packing) + torch.testing.assert_close(actual, expected) + + @parametrize("device", _DEVICES) + @parametrize( + "input_output", + [ + (0b000000, 0.0), + (0b001100, 1.0), + (0b011111, 28.0), # max + (0b000001, 0.0625), # min + (0b001110, 1.5), + (0b000011, 0.1875), # subnormal + ], + ) + def test_from_float6_e3m2_no_bit_packing_correctness(self, device, input_output): + input, output = input_output + input = torch.tensor(input, device=device, dtype=torch.uint8) + assert from_float6_e3m2(input, no_bit_packing=True).item() == output + + @parametrize("device", _DEVICES) + def test_from_float6_e3m2_bit_packing_correctness(self, device): + x = torch.randint(256, (128, 128 // 4 * 3), device=device, dtype=torch.uint8) + actual = from_float6_e3m2(x) + + bits0, bits1, bits2 = x.unflatten(-1, (-1, 3)).unbind(-1) + x_unpacked0 = bits0 >> 2 + x_unpacked1 = ((bits0 & 0x3) << 4) | (bits1 >> 4) + x_unpacked2 = ((bits1 & 0xF) << 2) | (bits2 >> 6) + x_unpacked3 = bits2 & 0x3F + + x_unpacked = torch.stack([x_unpacked0, x_unpacked1, x_unpacked2, x_unpacked3], dim=-1).flatten(-2) + expected = from_float6_e3m2(x_unpacked, no_bit_packing=True) + torch.testing.assert_close(actual, expected) + + @parametrize("device", _DEVICES) + @parametrize("no_bit_packing", [False, True]) + def test_from_float6_e3m2_compile(self, device, no_bit_packing): + x = torch.randint(256, size=(20, 15), device=device, dtype=torch.uint8) + expected = from_float6_e3m2(x, no_bit_packing=no_bit_packing) + + from_float6_e3m2_compiled = torch.compile(from_float6_e3m2) + actual = from_float6_e3m2_compiled(x, no_bit_packing=no_bit_packing) + torch.testing.assert_close(actual, expected) + + +instantiate_parametrized_tests(TestFp6) + + +if __name__ == "__main__": + run_tests() diff --git a/test/test_ops.py b/test/test_ops.py index 6ce6a4afba..4e463b4e26 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -50,24 +50,21 @@ def test_prepack_fp6_weight(self): opcheck(torch.ops.torchao.prepack_fp6_weight, (fp6_weight,), test_utils=test_utils) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_fp16_to_fp6(self): + def test_fp16_to_fp6_original(self): OC = 256 IC = 256 - - # in this fp6, we use 3 bits for exponent and 2 bits for mantissa - # also, we don't have nan/inf - fp6_absmax = 28.0 # 2 ** (0b111 - 0b011) * (1 + 0.5 + 0.25), where E=111, M=11 - fp6_absmin = 0.0625 # 2 ** (-0b010) * 0.25, where E=000, M=01 (subnormal number) fp16_weight = torch.randn((OC, IC), dtype=torch.float16) - fp16_weight.clip_(-fp6_absmax, fp6_absmax) - fp16_weight[fp16_weight.abs() < fp6_absmin] = 0 + + # the original FP16->FP6 kernel checks for overflow/underflow + fp16_weight.clip_(-28.0, 28.0) + fp16_weight[fp16_weight.abs() < 0.0625] = 0.0 # smoke test - torchao.ops.fp16_to_fp6(fp16_weight) + torchao.ops.fp16_to_fp6_original(fp16_weight) # comprehensive testing test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] - opcheck(torch.ops.torchao.fp16_to_fp6, (fp16_weight,), test_utils=test_utils) + opcheck(torch.ops.torchao.fp16_to_fp6_original, (fp16_weight,), test_utils=test_utils) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_fp16act_fp6weight_linear(self): @@ -89,19 +86,6 @@ def test_fp16act_fp6weight_linear(self): test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] opcheck(torch.ops.torchao.fp16act_fp6weight_linear, (act_cuda, weight_cuda, scale_cuda, splitK), test_utils=test_utils) - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_fp6_weight_dequant(self): - OC = 256 - IC = 256 - fp6_weight, fp16_scale, _ = self._create_fp6_inputs(0, OC, IC) - - # smoke test - torchao.ops.fp6_weight_dequant(fp6_weight, fp16_scale) - - # comprehensive testing - test_utils = ["test_schema", "test_autograd_registration", "test_faketensor", "test_aot_dispatch_dynamic"] - opcheck(torch.ops.torchao.fp6_weight_dequant, (fp6_weight, fp16_scale), test_utils=test_utils) - # adapted from https://github.com/usyd-fsalab/fp6_llm/blob/main/tests/python/kernel_test.py @parameterized.expand([(1, 2048, 4096, 5), (2, 8192, 8192, 6)]) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") @@ -115,8 +99,8 @@ def test_fp6_matmul_correctness(self, BS, OC, IC, splitK): results_fp6 = torchao.ops.fp16act_fp6weight_linear(act_cuda, weight_cuda, scale_cuda, splitK) - fp16_weight = torchao.ops.fp6_weight_dequant(fp6_weight, fp16_scale).cuda() - results_fp16 = act_cuda @ fp16_weight.T + fp16_weight = torchao.dtypes.from_float6_e3m2(fp6_weight.view(torch.uint8), dtype=torch.float16) * fp16_scale[:, None] + results_fp16 = act_cuda @ fp16_weight.cuda().T error = (results_fp6 - results_fp16).abs() relative_error = error / results_fp16.abs() diff --git a/torchao/__init__.py b/torchao/__init__.py index c982e09a0c..c8f04c1d9e 100644 --- a/torchao/__init__.py +++ b/torchao/__init__.py @@ -1,9 +1,3 @@ -from torchao.quantization import ( - apply_weight_only_int8_quant, - apply_dynamic_quant, - autoquant, -) -from . import dtypes import torch _IS_FBCODE = ( hasattr(torch._utils_internal, "IS_FBSOURCE") and @@ -14,6 +8,13 @@ from . import _C from . import ops +from torchao.quantization import ( + apply_weight_only_int8_quant, + apply_dynamic_quant, + autoquant, +) +from . import dtypes + __all__ = [ "dtypes", "apply_dynamic_quant", diff --git a/torchao/csrc/cuda/fp6_llm/weight_quant.cu b/torchao/csrc/cuda/fp6_llm/weight_quant.cu index d29f70be0c..b519cbfb0d 100644 --- a/torchao/csrc/cuda/fp6_llm/weight_quant.cu +++ b/torchao/csrc/cuda/fp6_llm/weight_quant.cu @@ -13,7 +13,6 @@ // limitations under the License. // // This file is adapted from https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_quant.h -// and https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_dequant.h #include #include @@ -120,41 +119,6 @@ void weight_prepacking_fp16_to_fp6(uint16_t* weight_16bit, } } -void DeQuantMatrix_FP6_To_FP16(half* A_16bit_h, unsigned char* A_6bit_h, size_t M, size_t K, half* scale) { - assert(M%64==0); // Currently, M must be a multiple of 64. - assert(K%64==0); // Currently, K must be a multiple of 64. - size_t TotalSizeInByte = M*K*6/8; - // - half* OutPTR = A_16bit_h; - for(size_t i=0; i>2)&0x1f); - unsigned char B2 = (A_6bit_h[i*3+0]<<6) | ((A_6bit_h[i*3+1]>>2)&0xfc); - B2 = (B2&0x80) | ((B2>>2)&0x1f); - unsigned char B3 = (A_6bit_h[i*3+1]<<4) | ((A_6bit_h[i*3+2]>>4)&0xfc); - B3 = (B3&0x80) | ((B3>>2)&0x1f); - unsigned char B4 = A_6bit_h[i*3+2]<<2; - B4 = (B4&0x80) | ((B4>>2)&0x1f); - half FP1, FP2, FP3, FP4; - unsigned char *PTR1, *PTR2, *PTR3, *PTR4; - PTR1 = reinterpret_cast(&FP1); - PTR2 = reinterpret_cast(&FP2); - PTR3 = reinterpret_cast(&FP3); - PTR4 = reinterpret_cast(&FP4); - PTR1[0] = 0; PTR1[1] = B1; // small endian for X86 CPU - PTR2[0] = 0; PTR2[1] = B2; - PTR3[0] = 0; PTR3[1] = B3; - PTR4[0] = 0; PTR4[1] = B4; - OutPTR[0] = __float2half_rn ( __half2float(FP1) * 4096.0f * __half2float(scale[(4*i)/K]) ); - OutPTR[1] = __float2half_rn ( __half2float(FP2) * 4096.0f * __half2float(scale[(4*i)/K]) ); - OutPTR[2] = __float2half_rn ( __half2float(FP3) * 4096.0f * __half2float(scale[(4*i)/K]) ); - OutPTR[3] = __float2half_rn ( __half2float(FP4) * 4096.0f * __half2float(scale[(4*i)/K]) ); - // - OutPTR +=4; - } -} - - #include #include #include @@ -162,7 +126,7 @@ void DeQuantMatrix_FP6_To_FP16(half* A_16bit_h, unsigned char* A_6bit_h, size_t namespace torchao { // https://github.com/microsoft/DeepSpeed/blob/0fc19b6a320cf8aa0a5f6c2b1fa310bae9a70d94/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp#L194 -at::Tensor fp16_to_fp6_cpu(at::Tensor fp16_tensor) +at::Tensor fp16_to_fp6_original_cpu(at::Tensor fp16_tensor) { TORCH_CHECK(fp16_tensor.dim() == 2, "weight must be 2-dimensional"); TORCH_CHECK(fp16_tensor.scalar_type() == torch::kFloat16, "weight must be FP16"); @@ -183,37 +147,8 @@ at::Tensor fp16_to_fp6_cpu(at::Tensor fp16_tensor) return packed_fp6_tensor; } -/* - * Dequant a FP6 matrix to a equivalent FP16 matrix using CPUs. - * A useful tool to construct input matrices for the FP16 GEMM baseline. - * [Input] - * fp6_tensor: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. - * fp16_scale: half tensor of shape [OC]; // for row-wise quantization. - * [Output] - * fp16_tensor: half tensor of shape [OC, IC]. - */ -at::Tensor weight_matrix_dequant_cpu(at::Tensor fp6_tensor, at::Tensor fp16_scale) -{ - int OC = fp6_tensor.size(0); - TORCH_CHECK(fp6_tensor.size(1) % 3 == 0); - int IC = fp6_tensor.size(1) / 3 * 16; - TORCH_CHECK(fp16_scale.size(0) == OC); - // - auto fp6_tensor_ptr = reinterpret_cast(fp6_tensor.data_ptr()); - auto fp16_scale_ptr = reinterpret_cast(fp16_scale.data_ptr()); - // - auto options = at::TensorOptions().dtype(at::kHalf).device(fp16_scale.device()); - at::Tensor fp16_tensor = at::empty({OC, IC}, options); - auto fp16_tensor_ptr = reinterpret_cast(fp16_tensor.data_ptr()); - // - DeQuantMatrix_FP6_To_FP16(fp16_tensor_ptr, fp6_tensor_ptr, OC, IC, fp16_scale_ptr); - // - return fp16_tensor; -} - TORCH_LIBRARY_IMPL(torchao, CPU, m) { - m.impl("torchao::fp16_to_fp6", &fp16_to_fp6_cpu); - m.impl("torchao::fp6_weight_dequant", &weight_matrix_dequant_cpu); + m.impl("torchao::fp16_to_fp6_original", &fp16_to_fp6_original_cpu); } } diff --git a/torchao/csrc/fp6_llm/float6_e3m2.cpp b/torchao/csrc/fp6_llm/float6_e3m2.cpp new file mode 100644 index 0000000000..16d71f51d9 --- /dev/null +++ b/torchao/csrc/fp6_llm/float6_e3m2.cpp @@ -0,0 +1,319 @@ +#include +#include +#include + +#include +#include +#include + + +class float6_e3m2_nan_inf : public std::invalid_argument { +public: + float6_e3m2_nan_inf() : std::invalid_argument("Encounter +/-inf or NaN, which is not representable in float6_e3m2.") { } +}; + +class float6_e3m2_overflow : public std::invalid_argument { +public: + float6_e3m2_overflow() : std::invalid_argument("float6_e3m2 overflow. float6_e3m2 cannot represent +/-inf. Make sure input < 30.0") { } +}; + +// we need to do this because C++17 does not allow using struct as template non-type parameter +// use the upper 16 bits for num exponent, lower 16 bits for num mantissa +static constexpr uint32_t encode_fp_spec(uint32_t n_exp, uint32_t n_man) { return (n_exp << 16u) | n_man; } +static constexpr uint32_t FP32_SPEC = encode_fp_spec(8u, 23u); +static constexpr uint32_t FP16_SPEC = encode_fp_spec(5u, 10u); +static constexpr uint32_t BF16_SPEC = encode_fp_spec(8u, 7u); + +// NOTE: only works for len < 32 +static constexpr uint32_t ones_mask(uint32_t len) { return (1u << len) - 1u; } + +// inspired by __internal_float2half() and float2half() from "cuda_fp16.hpp" +template +static uint8_t to_float6_e3m2_bits(T bits_) { + constexpr uint32_t N_EXP = FP_SPEC >> 16u; + constexpr uint32_t N_MAN = FP_SPEC & ones_mask(16u); + constexpr uint32_t N_EXP_MAN = N_EXP + N_MAN; + constexpr uint32_t EXP_BIAS_DIFF = ones_mask(N_EXP - 1u) - 3u; + + // sanity checks. will be removed in template instantiation. + // minimum 1 bit above FP6 (3 exponent bits and 2 mantissa bits) to avoid edge cases. + static_assert(N_EXP >= 4, "Number of exponent bits must be >= 4."); + static_assert(N_MAN >= 3, "Number of mantissa bits must be >= 3."); + + uint32_t bits = bits_; // bit extension + uint32_t sign = bits >> N_EXP_MAN << 5u; + bits &= ones_mask(N_EXP_MAN); // clear sign bit + uint32_t result, remainder; + + // all exponent bits are 1s + if (bits >= (ones_mask(N_EXP) << N_MAN)) throw float6_e3m2_nan_inf(); + + // max FP6 (28) + half of least significand (2) = 30 (assume N_MAN >= 3) + if (bits >= (((EXP_BIAS_DIFF + 7u) << N_MAN) | (0x7u << (N_MAN - 3u)))) throw float6_e3m2_overflow(); + + // FP6 normal number (E>=001) + if (bits >= ((EXP_BIAS_DIFF + 1u) << N_MAN)) { + remainder = bits << (32u - (N_MAN - 2u)); // shift the truncated bits to most significant position + bits -= (EXP_BIAS_DIFF << N_MAN); // update exponent + result = sign | (bits >> (N_MAN - 2u)); + } + // FP6 subnormal number (more than half of min FP6 subnormal = 0.0625 * 0.5) + else if (bits > ((EXP_BIAS_DIFF - 2u) << N_MAN)) { + uint32_t exp = bits >> N_MAN; + uint32_t man = bits & ones_mask(N_MAN); + + // to make subnormal FP6 from normal FP16 + // step 1: add implicit 1 to mantissa + man |= (1u << N_MAN); + + // step 2: shift mantissa right so that exponent value is equal to + // exponent value of FP6 subnormal, which is -2 (equivalent to E=001) + uint32_t shift = EXP_BIAS_DIFF + 1u - exp; + remainder = man << (32u - (N_MAN - 2u + shift)); // shift the truncated bits to most significant position + result = sign | (man >> (shift + (N_MAN - 2u))); // implicit E=000 + } + // FP6 underflow. E=000, M=00 + else { + remainder = 0u; + result = sign; + } + + // round to nearest even + if ((remainder > 0x8000'0000u) || ((remainder == 0x8000'0000u) && (result & 0x1u))) { + result += 1; + } + return result; +} + +// assume the lower 6 bits contain the data. +template +static T from_float6_e3m2_bits(uint8_t a) { + constexpr uint32_t N_EXP = FP_SPEC >> 16u; + constexpr uint32_t N_MAN = FP_SPEC & ones_mask(16u); + constexpr uint32_t N_EXP_MAN = N_EXP + N_MAN; + constexpr uint32_t EXP_BIAS_DIFF = ones_mask(N_EXP - 1u) - 3u; + + uint32_t bits = a; // bit extension + uint32_t sign = bits >> 5u; + uint32_t exp = (bits >> 2u) & 0x7u; + uint32_t man = bits & 0x3u; + + if (exp > 0u) { // FP6 normal numbers + exp += EXP_BIAS_DIFF; + } else if (man > 0u) { // FP6 denormal numbers + uint32_t shift = (man >= 0b10u) ? 1u : 2u; + man = (man << shift) & 0x3u; // shift and remove explicit 1 + exp = 1u + EXP_BIAS_DIFF - shift; + } + // don't need to handle zero, since E=000 and M=00 + + uint32_t result = (sign << N_EXP_MAN) | (exp << N_MAN) | (man << (N_MAN - 2u)); + return static_cast(result); +} + +namespace torchao { + +template void to_float6_e3m2_unpacked_cpu_impl(const T *bits_ptr, uint8_t *fp6_ptr, int n) { + // exception within OpenMP parallel region must be caught. + // set a flag when exception occurs, then re-raise it. + bool found_nan_inf = false; + bool found_overflow = false; + +#pragma omp parallel for + for (int i = 0; i < n; i++) { + try { fp6_ptr[i] = to_float6_e3m2_bits(bits_ptr[i]); } + catch (float6_e3m2_nan_inf const &) { found_nan_inf = true; } + catch (float6_e3m2_overflow const &) { found_overflow = true; } + } + + if (found_nan_inf) throw float6_e3m2_nan_inf(); + if (found_overflow) throw float6_e3m2_overflow(); +} + +// this is useful for debugging +at::Tensor to_float6_e3m2_unpacked_cpu(at::Tensor fp_tensor) { + TORCH_CHECK(fp_tensor.is_contiguous()); + TORCH_CHECK(fp_tensor.is_cpu()); + + at::TensorOptions options = at::TensorOptions().dtype(torch::kUInt8).device(fp_tensor.device()); + at::Tensor fp6_tensor = at::empty(fp_tensor.sizes(), options); + uint8_t *fp6_ptr = fp6_tensor.data_ptr(); + + int n = fp_tensor.numel(); + auto dtype = fp_tensor.dtype(); + + if (dtype == torch::kFloat32) { + const uint32_t *fp32_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_unpacked_cpu_impl(fp32_ptr, fp6_ptr, n); + + } else if (dtype == torch::kFloat16) { + const uint16_t *fp16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_unpacked_cpu_impl(fp16_ptr, fp6_ptr, n); + + } else if (dtype == torch::kBFloat16) { + const uint16_t *bf16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_unpacked_cpu_impl(bf16_ptr, fp6_ptr, n); + + } else { + throw std::invalid_argument("Only FP32, FP16, and BF16 inputs are accepted."); + } + + return fp6_tensor; +} + +template void to_float6_e3m2_packed_cpu_impl(const T *bits_ptr, uint8_t *fp6_ptr, int n) { + // exception within OpenMP parallel region must be caught. + // set a flag when exception occurs, then re-raise it. + bool found_nan_inf = false; + bool found_overflow = false; + +#pragma omp parallel for + for (int i = 0; i < n / 4; i++) { + try { + uint8_t val0 = to_float6_e3m2_bits(bits_ptr[i * 4]); + uint8_t val1 = to_float6_e3m2_bits(bits_ptr[i * 4 + 1]); + uint8_t val2 = to_float6_e3m2_bits(bits_ptr[i * 4 + 2]); + uint8_t val3 = to_float6_e3m2_bits(bits_ptr[i * 4 + 3]); + + fp6_ptr[i * 3] = (val0 << 2) | (val1 >> 4); // 0000 0011 + fp6_ptr[i * 3 + 1] = (val1 << 4) | (val2 >> 2); // 1111 2222 + fp6_ptr[i * 3 + 2] = (val2 << 6) | (val3); // 2233 3333 + } + catch (float6_e3m2_nan_inf const &) { found_nan_inf = true; } + catch (float6_e3m2_overflow const &) { found_overflow = true; } + } + + if (found_nan_inf) throw float6_e3m2_nan_inf(); + if (found_overflow) throw float6_e3m2_overflow(); +} + +at::Tensor to_float6_e3m2_packed_cpu(at::Tensor fp_tensor) { + TORCH_CHECK(fp_tensor.is_contiguous()); + TORCH_CHECK(fp_tensor.is_cpu()); + TORCH_CHECK(fp_tensor.ndimension() == 2); + + int M = fp_tensor.size(0); + int N = fp_tensor.size(1); + TORCH_CHECK(N % 4 == 0, "Last dimension must be a multiple of 4, receives ", N); + + at::TensorOptions options = at::TensorOptions().dtype(torch::kUInt8).device(fp_tensor.device()); + at::Tensor fp6_tensor = at::empty({M, N * 3 / 4}, options); + uint8_t *fp6_ptr = fp6_tensor.data_ptr(); + + int n = fp_tensor.numel(); + auto dtype = fp_tensor.dtype(); + + if (dtype == torch::kFloat32) { + const uint32_t *fp32_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_packed_cpu_impl(fp32_ptr, fp6_ptr, n); + + } else if (dtype == torch::kFloat16) { + const uint16_t *fp16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_packed_cpu_impl(fp16_ptr, fp6_ptr, n); + + } else if (dtype == torch::kBFloat16) { + const uint16_t *bf16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + to_float6_e3m2_packed_cpu_impl(bf16_ptr, fp6_ptr, n); + + } else { + throw std::invalid_argument("Only FP32, FP16, and BF16 inputs are accepted."); + } + + return fp6_tensor; +} + +template +void from_float6_e3m2_unpacked_cpu_impl(const uint8_t *fp6_ptr, T *fp_ptr, int n) { +#pragma omp parallel for + for (int i = 0; i < n; i++) + fp_ptr[i] = from_float6_e3m2_bits(fp6_ptr[i]); +} + +at::Tensor from_float6_e3m2_unpacked_cpu(at::Tensor fp6_tensor, c10::ScalarType dtype) { + TORCH_CHECK(fp6_tensor.dtype() == torch::kUInt8); + TORCH_CHECK(fp6_tensor.is_contiguous()); + TORCH_CHECK(fp6_tensor.is_cpu()); + + at::TensorOptions options = at::TensorOptions().dtype(dtype).device(fp6_tensor.device()); + at::Tensor fp_tensor = at::empty(fp6_tensor.sizes(), options); + + const uint8_t *fp6_ptr = fp6_tensor.data_ptr(); + int n = fp6_tensor.numel(); + + if (dtype == torch::kFloat32) { + uint32_t *fp32_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_unpacked_cpu_impl(fp6_ptr, fp32_ptr, n); + + } else if (dtype == torch::kFloat16) { + uint16_t *fp16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_unpacked_cpu_impl(fp6_ptr, fp16_ptr, n); + + } else if (dtype == torch::kBFloat16) { + uint16_t *bf16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_unpacked_cpu_impl(fp6_ptr, bf16_ptr, n); + + } else { + throw std::invalid_argument("Only FP32, FP16, and BF16 inputs are accepted."); + } + + return fp_tensor; +} + +template +void from_float6_e3m2_packed_cpu_impl(const uint8_t *fp6_ptr, T *fp_ptr, int n) { +#pragma omp parallel for + for (int i = 0; i < n / 3; i++) { + uint8_t bits0 = fp6_ptr[i * 3]; // 0000 0011 + uint8_t bits1 = fp6_ptr[i * 3 + 1]; // 1111 2222 + uint8_t bits2 = fp6_ptr[i * 3 + 2]; // 2233 3333 + + fp_ptr[i * 4] = from_float6_e3m2_bits(bits0 >> 2); + fp_ptr[i * 4 + 1] = from_float6_e3m2_bits(((bits0 & 0x3u) << 4) | (bits1 >> 4)); + fp_ptr[i * 4 + 2] = from_float6_e3m2_bits(((bits1 & 0xFu) << 2) | (bits2 >> 6)); + fp_ptr[i * 4 + 3] = from_float6_e3m2_bits(bits2 & 0x3Fu); + } +} + +at::Tensor from_float6_e3m2_packed_cpu(at::Tensor fp6_tensor, c10::ScalarType dtype) { + TORCH_CHECK(fp6_tensor.dtype() == torch::kUInt8); + TORCH_CHECK(fp6_tensor.is_contiguous()); + TORCH_CHECK(fp6_tensor.is_cpu()); + TORCH_CHECK(fp6_tensor.ndimension() == 2); + + int M = fp6_tensor.size(0); + int N = fp6_tensor.size(1); + TORCH_CHECK(N % 3 == 0, "Last dimension must be a multiple of 3, receives ", N); + + at::TensorOptions options = at::TensorOptions().dtype(dtype).device(fp6_tensor.device()); + at::Tensor fp_tensor = at::empty({M, N / 3 * 4}, options); + + const uint8_t *fp6_ptr = fp6_tensor.data_ptr(); + int n = fp6_tensor.numel(); + + if (dtype == torch::kFloat32) { + uint32_t *fp32_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_packed_cpu_impl(fp6_ptr, fp32_ptr, n); + + } else if (dtype == torch::kFloat16) { + uint16_t *fp16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_packed_cpu_impl(fp6_ptr, fp16_ptr, n); + + } else if (dtype == torch::kBFloat16) { + uint16_t *bf16_ptr = reinterpret_cast(fp_tensor.data_ptr()); + from_float6_e3m2_packed_cpu_impl(fp6_ptr, bf16_ptr, n); + + } else { + throw std::invalid_argument("Only FP32, FP16, and BF16 inputs are accepted."); + } + + return fp_tensor; +} + +TORCH_LIBRARY_IMPL(torchao, CPU, m) { + m.impl("torchao::to_float6_e3m2_unpacked_cpu", &to_float6_e3m2_unpacked_cpu); + m.impl("torchao::to_float6_e3m2_packed_cpu", &to_float6_e3m2_packed_cpu); + m.impl("torchao::from_float6_e3m2_unpacked_cpu", &from_float6_e3m2_unpacked_cpu); + m.impl("torchao::from_float6_e3m2_packed_cpu", &from_float6_e3m2_packed_cpu); +} + +} diff --git a/torchao/csrc/fp6_llm/fp6_llm.cpp b/torchao/csrc/fp6_llm/fp6_llm.cpp index 794c79df11..5239593bb6 100644 --- a/torchao/csrc/fp6_llm/fp6_llm.cpp +++ b/torchao/csrc/fp6_llm/fp6_llm.cpp @@ -6,6 +6,10 @@ TORCH_LIBRARY_FRAGMENT(torchao, m) { m.impl_abstract_pystub("torchao.ops"); m.def("fp16act_fp6weight_linear(Tensor _in_feats, Tensor _weights, Tensor _scales, int splitK) -> Tensor"); m.def("prepack_fp6_weight(Tensor fp6_tensor) -> Tensor"); - m.def("fp16_to_fp6(Tensor fp16_tensor) -> Tensor"); - m.def("fp6_weight_dequant(Tensor fp6_tensor, Tensor fp16_scale) -> Tensor"); + m.def("fp16_to_fp6_original(Tensor fp16_tensor) -> Tensor"); + + m.def("to_float6_e3m2_unpacked_cpu(Tensor tensor) -> Tensor"); + m.def("to_float6_e3m2_packed_cpu(Tensor tensor) -> Tensor"); + m.def("from_float6_e3m2_unpacked_cpu(Tensor tensor, ScalarType dtype) -> Tensor"); + m.def("from_float6_e3m2_packed_cpu(Tensor tensor, ScalarType dtype) -> Tensor"); } diff --git a/torchao/dtypes/__init__.py b/torchao/dtypes/__init__.py index dccd22f3d4..d12a6da566 100644 --- a/torchao/dtypes/__init__.py +++ b/torchao/dtypes/__init__.py @@ -1,6 +1,7 @@ from .nf4tensor import NF4Tensor, to_nf4 from .uint4 import UInt4Tensor from .aqt import AffineQuantizedTensor, to_aq +from .float6_e3m2 import to_float6_e3m2, from_float6_e3m2 __all__ = [ "NF4Tensor", @@ -8,4 +9,6 @@ "UInt4Tensor" "AffineQuantizedTensor", "to_aq", + "to_float6_e3m2", + "from_float6_e3m2", ] diff --git a/torchao/dtypes/float6_e3m2.py b/torchao/dtypes/float6_e3m2.py new file mode 100644 index 0000000000..0c27838d06 --- /dev/null +++ b/torchao/dtypes/float6_e3m2.py @@ -0,0 +1,178 @@ +import torch +from torch import Tensor +from torch.utils._triton import has_triton +from torchao.ops import to_float6_e3m2_packed_cpu, to_float6_e3m2_unpacked_cpu, from_float6_e3m2_packed_cpu, from_float6_e3m2_unpacked_cpu + + +# some useful constants +FLOAT6_E3M2_MAX = 28.0 +FLOAT6_E3M2_SMALLEST_SUBNORMAL = 0.0625 + + +if has_triton(): + import triton + from triton import language as tl + + # see _to_float6_e3m2_pt() for explanation + @triton.jit + def _triton_float32_to_float6_e3m2(x: tl.tensor): + x = x.to(tl.float32) + x = x * 2.0 ** (-127 + 3) + bits = x.to(tl.int32, bitcast=True) + + sign = ((bits >> 31) & 0x1) << 5 + exp_and_man = (bits >> 21) & 0x1F + result = sign | exp_and_man + + remainder = bits & 0x1F_FFFF + do_round_up = (remainder > 0x10_0000) | ((remainder == 0x10_0000) & ((result & 1) == 1)) + result = tl.where(do_round_up, result + 1, result) + return result.to(tl.uint8) + + @triton.jit + def _to_float6_e3m2_triton_kernel(in_ptr, out_ptr, n, BLOCK_SIZE: tl.constexpr): + offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n + + # strided memory read. there will be uncoalesced memory access + val0 = _triton_float32_to_float6_e3m2(tl.load(in_ptr + offsets * 4, mask)) + val1 = _triton_float32_to_float6_e3m2(tl.load(in_ptr + offsets * 4 + 1, mask)) + val2 = _triton_float32_to_float6_e3m2(tl.load(in_ptr + offsets * 4 + 2, mask)) + val3 = _triton_float32_to_float6_e3m2(tl.load(in_ptr + offsets * 4 + 3, mask)) + + # bit packing + bits0 = (val0 << 2) | (val1 >> 4) # 0000 0011 + bits1 = (val1 << 4) | (val2 >> 2) # 1111 2222 + bits2 = (val2 << 6) | (val3); # 2233 3333 + + # strided memory write. there will be uncoalesced memory access + tl.store(out_ptr + offsets * 3, bits0, mask) + tl.store(out_ptr + offsets * 3 + 1, bits1, mask) + tl.store(out_ptr + offsets * 3 + 2, bits2, mask) + + def _to_float6_e3m2_triton(tensor: Tensor) -> Tensor: + out_shape = tensor.shape[:-1] + (tensor.shape[-1] // 4 * 3,) + output = torch.empty(out_shape, device=tensor.device, dtype=torch.uint8) + + n = tensor.numel() + grid_size = lambda meta: (triton.cdiv(n, meta["BLOCK_SIZE"] * 4),) + _to_float6_e3m2_triton_kernel[grid_size](tensor, output, n, BLOCK_SIZE=256) + + return output + +else: + _to_float6_e3m2_triton = None + + +# NOTE: This implementation requires FP32 denormal numbers to be handled correctly. +# On CPU, denormal numbers might be flushed to zero for performance gain (FTZ and DAZ flags). +def _to_float6_e3m2_pt(tensor: Tensor, no_bit_packing: bool = False) -> Tensor: + tensor = tensor.float() + + # correct exponent bias. this also handles subnormal numbers correctly + tensor = tensor * 2.0 ** (-127 + 3) + bits = tensor.view(torch.int32) + + sign = ((bits >> 31) & 0x1) << 5 + exp_and_man = (bits >> 21) & 0x1F + result = sign | exp_and_man + + # round to nearest even + remainder = bits & 0x1F_FFFF # truncated mantissa bits + do_round_up = (remainder > 0x10_0000) | ((remainder == 0x10_0000) & ((result & 1) == 1)) + result = torch.where(do_round_up, result + 1, result) + result = result.to(torch.uint8) + + if no_bit_packing: + return result + + # bit packing + val0, val1, val2, val3 = result.unflatten(-1, (-1, 4)).unbind(-1) + bits0 = (val0 << 2) | (val1 >> 4) # 0000 0011 + bits1 = (val1 << 4) | (val2 >> 2) # 1111 2222 + bits2 = (val2 << 6) | (val3); # 2233 3333 + return torch.stack([bits0, bits1, bits2], dim=-1).flatten(-2) + + +def to_float6_e3m2(tensor: Tensor, no_bit_packing: bool = False) -> Tensor: + """Convert input tensor to FP6. This particular FP6 format has 3 exponent bits and 2 mantissa + bits. By default, bit packing is performed: every 4 FP6 values are packed as 3 uint8 values + (4 x 6 bits = 3 x 8 bits). + + Args: + tensor: Input tensor. The last dimension must be divisible by 4 (unless ``no_bit_packing=False``) + no_bit_packing: Whether to not perform bit packing. Setting this to ``True`` can be useful for + observing the bit patterns and debugging. + + Returns: + :class:`torch.Tensor`: FP6 tensor, stored as uint8 data. If ``no_bit_packing=False``, the last + dimension of output tensor is 3/4 of that of input tensor. + + Note: + This FP6 format does not represent +/-inf and NaN. Thus, make sure that input tensor does + not have +/-inf or NaN values, and no values with magnitude >= 30 (largest number in FP6 is 28. + All numbers >= 28 and < 30 will be rounded down to 28, while >= 30 will overflow). + + See also :func:`from_float6_e3m2` + """ + if not no_bit_packing: + assert tensor.shape[-1] % 4 == 0, "Last dim must be divisible by 4" + + if tensor.is_cpu: + if no_bit_packing: + return to_float6_e3m2_unpacked_cpu(tensor) + + *leading_dims, last_dim = tensor.shape + return to_float6_e3m2_packed_cpu(tensor.view(-1, last_dim)).view(*leading_dims, -1) + + # torch.compile() cannot generate fused bit-packing triton kernel, + # thus we write custom triton kernel for this specific case. + if tensor.is_cuda and not no_bit_packing and _to_float6_e3m2_triton is not None: + return _to_float6_e3m2_triton(tensor) + + else: + return _to_float6_e3m2_pt(tensor, no_bit_packing=no_bit_packing) + + +# NOTE: This implementation requires FP32 denormal numbers to be handled correctly. +# On CPU, denormal numbers might be flushed to zero for performance gain (FTZ and DAZ flags). +def _pt_float6_e3m2_to_float32(tensor: Tensor) -> Tensor: + bits = tensor.to(torch.int32) # bit extension + sign = bits >> 5 << 31 + exp_and_man = (bits & 0x1F) << 21 + results = sign | exp_and_man + + results = results.view(torch.float32) + return results * 2.0 ** (127 - 3) # exponent bias correction + + +def from_float6_e3m2(tensor: Tensor, no_bit_packing: bool = False, dtype: torch.dtype = torch.float32) -> Tensor: + """Convert an FP6 tensor (created by :func:`to_float6_e3m2`) to FP32. + + Args: + tensor: FP6 tensor, stored as uint8 data. If ``no_bit_packing=False``, the last dimension must + be divisible by 3. + no_bit_packing: whether the input does not have bit packing. + dtype: returned dtype. + + Returns: + :class:`torch.Tensor`: FP32 tensor. If ``no_bit_packing=False``, the last dimension of output + tensor is 4/3 of that of input tensor. + """ + assert tensor.dtype == torch.uint8 + if no_bit_packing: + if tensor.is_cpu: + return from_float6_e3m2_unpacked_cpu(tensor, dtype) + + return _pt_float6_e3m2_to_float32(tensor).to(dtype) + + assert tensor.shape[-1] % 3 == 0, "Last dim must be divisible by 3" + if tensor.is_cpu: + return from_float6_e3m2_packed_cpu(tensor, dtype) + + bits0, bits1, bits2 = tensor.unflatten(-1, (-1, 3)).unbind(-1) + val0 = _pt_float6_e3m2_to_float32(bits0 >> 2).to(dtype) + val1 = _pt_float6_e3m2_to_float32(((bits0 & 0x3) << 4) | (bits1 >> 4)).to(dtype) + val2 = _pt_float6_e3m2_to_float32(((bits1 & 0xF) << 2) | (bits2 >> 6)).to(dtype) + val3 = _pt_float6_e3m2_to_float32(bits2 & 0x3F).to(dtype) + return torch.stack([val0, val1, val2, val3], dim=-1).flatten(-2) diff --git a/torchao/ops.py b/torchao/ops.py index 05a1668399..7fce2de22f 100644 --- a/torchao/ops.py +++ b/torchao/ops.py @@ -2,6 +2,7 @@ from torch import Tensor from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 + def register_custom_op(name): def decorator(func): if TORCH_VERSION_AFTER_2_4: @@ -11,7 +12,6 @@ def decorator(func): return decorator - def prepack_fp6_weight(fp6_weight: Tensor) -> Tensor: """ Pack FP6 tensor in a layout for use with FP6-LLM. See https://arxiv.org/abs/2401.14112 for more details. @@ -32,14 +32,20 @@ def _(fp6_weight): return torch.empty_like(fp6_weight) -def fp16_to_fp6(fp16_tensor: Tensor) -> Tensor: +def fp16_to_fp6_original(fp16_tensor: Tensor) -> Tensor: """ - Pack FP16 tensor (containing only FP6 values) into FP6 tensor. + Pack FP16 tensor to FP6 tensor. qtorch is required to use this function. """ - return torch.ops.torchao.fp16_to_fp6.default(fp16_tensor) + try: + from qtorch.quant import float_quantize + except ImportError as e: + raise RuntimeError("Please install qtorch to use this function") from e + + fp16_tensor = float_quantize(fp16_tensor.float(), 3, 2, rounding="nearest").half() + return torch.ops.torchao.fp16_to_fp6_original.default(fp16_tensor) -@register_custom_op("torchao::fp16_to_fp6") +@register_custom_op("torchao::fp16_to_fp6_original") def _(fp16_tensor): torch._check(fp16_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp16_tensor.dim()}D") torch._check(fp16_tensor.dtype is torch.float16, lambda: f"weight must be FP16, got {fp16_tensor.dtype}") @@ -81,18 +87,17 @@ def _(_in_feats, _weights, _scales, splitK = 1): return _in_feats.new_empty((BS, OC)) -def fp6_weight_dequant(fp6_tensor: Tensor, fp16_scale: Tensor) -> Tensor: - return torch.ops.torchao.fp6_weight_dequant.default(fp6_tensor, fp16_scale) +def to_float6_e3m2_unpacked_cpu(tensor: Tensor) -> Tensor: + return torch.ops.torchao.to_float6_e3m2_unpacked_cpu.default(tensor) + + +def to_float6_e3m2_packed_cpu(tensor: Tensor) -> Tensor: + return torch.ops.torchao.to_float6_e3m2_packed_cpu.default(tensor) -@register_custom_op("torchao::fp6_weight_dequant") -def _(fp6_tensor, fp16_scale): - torch._check(fp6_tensor.dim() == 2, lambda: f"weight should be a 2d tensor, got {fp6_tensor.dim()}D") - torch._check(fp6_tensor.dtype is torch.int32, lambda: f"weight must be INT32, got {fp6_tensor.dtype}") - torch._check(fp16_scale.dim() == 1, lambda: f"scale should be a 2d tensor, got {fp16_scale.dim()}D") - torch._check(fp16_scale.dtype is torch.float16, lambda: f"scale must be FP16, got {fp16_scale.dtype}") +def from_float6_e3m2_unpacked_cpu(tensor: Tensor, dtype: torch.dtype) -> Tensor: + return torch.ops.torchao.from_float6_e3m2_unpacked_cpu.default(tensor, dtype) - OC, _IC = fp6_tensor.shape - torch._check(OC == fp16_scale.shape[0], lambda: "Dimensions mismatched") - return fp16_scale.new_empty((OC, _IC * 16 // 3)) +def from_float6_e3m2_packed_cpu(tensor: Tensor, dtype: torch.dtype) -> Tensor: + return torch.ops.torchao.from_float6_e3m2_packed_cpu.default(tensor, dtype) From 7511b1d365e2e314d1193d7b8df049ee9452e63c Mon Sep 17 00:00:00 2001 From: Thien Tran Date: Mon, 27 May 2024 01:36:53 +0800 Subject: [PATCH 58/61] Improve FP6-LLM 2+4bit weight splitting + user API (#279) * add annotation * add weight splitting logic * update from fp6_quant * merge to_tc_float6_e3m2 * add more optimized version * add some notes * add from_tc_float6_e3m2 * add some docs * make fp6_llm.py * add test for linear * fix fp6 llm * switch to v2 since it's faster * fix type hint for old python * simplify further * fix typing for old python * add test * eliminate indexing.faster on CUDA * skip fp6_llm on cpu * improve error message * add support for extra batch dims * cast output to original dtype * fix precision error due to dtype --- test/dtypes/test_float6_e3m2.py | 4 +- test/quantization/test_fp6_llm.py | 99 +++++++++++++++ torchao/csrc/cuda/fp6_llm/fp6_linear.cu | 4 +- torchao/quantization/fp6_llm.py | 160 ++++++++++++++++++++++++ 4 files changed, 263 insertions(+), 4 deletions(-) create mode 100644 test/quantization/test_fp6_llm.py create mode 100644 torchao/quantization/fp6_llm.py diff --git a/test/dtypes/test_float6_e3m2.py b/test/dtypes/test_float6_e3m2.py index b821504731..c3365cffeb 100644 --- a/test/dtypes/test_float6_e3m2.py +++ b/test/dtypes/test_float6_e3m2.py @@ -12,7 +12,7 @@ _DEVICES = ["cpu"] + (["cuda"] if torch.cuda.is_available() else []) -class TestFp6(TestCase): +class TestFloat6E3M2(TestCase): @parametrize("device", _DEVICES) @parametrize("dtype", _DTYPES) @@ -120,7 +120,7 @@ def test_from_float6_e3m2_compile(self, device, no_bit_packing): torch.testing.assert_close(actual, expected) -instantiate_parametrized_tests(TestFp6) +instantiate_parametrized_tests(TestFloat6E3M2) if __name__ == "__main__": diff --git a/test/quantization/test_fp6_llm.py b/test/quantization/test_fp6_llm.py new file mode 100644 index 0000000000..635f78765c --- /dev/null +++ b/test/quantization/test_fp6_llm.py @@ -0,0 +1,99 @@ +import pytest +import torch +from torch import nn +from torch.testing._internal.common_utils import ( + TestCase, + instantiate_parametrized_tests, + parametrize, + run_tests, +) +from torchao.dtypes.float6_e3m2 import to_float6_e3m2, from_float6_e3m2 +from torchao.quantization.fp6_llm import to_tc_float6_e3m2, from_tc_float6_e3m2, Fp6LlmLinear, convert_fp6_llm +from torchao.ops import prepack_fp6_weight + + +_DEVICES = ["cpu"] + (["cuda"] if torch.cuda.is_available() else []) + + +class TestFp6LlmLinear(TestCase): + @parametrize("device", _DEVICES) + def test_to_tc_float6_e3m2_correctness(self, device): + x = torch.randn(256, 64, device=device) + + expected = prepack_fp6_weight(to_float6_e3m2(x.cpu()).view(torch.int32)).view(torch.uint8) + actual = to_tc_float6_e3m2(x) + torch.testing.assert_close(actual.view(-1).cpu(), expected.view(-1)) + + @parametrize("device", _DEVICES) + def test_to_tc_float6_e3m2_compile(self, device): + x = torch.randn(256, 64, device=device) + + expected = to_tc_float6_e3m2(x) + actual = torch.compile(to_tc_float6_e3m2)(x) + torch.testing.assert_close(actual, expected) + + @parametrize("device", _DEVICES) + def test_from_tc_float6_e3m2_correctness(self, device): + x = torch.randn(256, 64, device=device) + x = from_float6_e3m2(to_float6_e3m2(x)) # quantize and dequantize so that the values are exactly representable in FP6 + + actual = from_tc_float6_e3m2(to_tc_float6_e3m2(x), *x.shape) + torch.testing.assert_close(actual, x) + + @parametrize("device", _DEVICES) + def test_from_tc_float6_e3m2_compile(self, device): + M, N = 256, 64 + x = torch.randint(256, size=(M * N * 3 // 4,), dtype=torch.uint8, device=device) + + expected = from_tc_float6_e3m2(x, M, N) + actual = torch.compile(from_tc_float6_e3m2)(x, M, N) + torch.testing.assert_close(actual, expected) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @parametrize("leading_dims", [(4,), (2, 4)]) + @parametrize("bias", [False, True]) + def test_fp6_llm_linear_forward(self, bias, leading_dims): + OC, IC = 256, 64 + device = "cuda" + + linear = torch.nn.Linear(IC, OC, bias=bias, device=device) + fp6_linear = Fp6LlmLinear.from_float(linear) + assert (fp6_linear.bias is not None) == bias + + x = torch.randn(*leading_dims, IC, device=device, dtype=torch.half) + fp6_linear(x) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @parametrize("bias", [False, True]) + def test_fp6_llm_linear_compile(self, bias): + N, OC, IC = 4, 256, 64 + device = "cuda" + + linear = torch.nn.Linear(IC, OC, bias=bias, device=device) + fp6_linear = Fp6LlmLinear.from_float(linear) + + x = torch.randn(N, IC, device=device, dtype=torch.half) + expected = fp6_linear(x) + actual = torch.compile(fp6_linear)(x) + torch.testing.assert_close(actual, expected) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_convert_fp6_llm(self): + device = "cuda" + model = nn.Sequential(nn.Linear(64, 256, bias=False), nn.Linear(256, 256)).to(device) + convert_fp6_llm(model) + + assert isinstance(model[0], Fp6LlmLinear) + assert model[0].bias is None + assert isinstance(model[1], Fp6LlmLinear) + assert model[1].bias is not None + + x = torch.randn(4, 64, device=device) + model(x) + + +instantiate_parametrized_tests(TestFp6LlmLinear) + + +if __name__ == "__main__": + run_tests() diff --git a/torchao/csrc/cuda/fp6_llm/fp6_linear.cu b/torchao/csrc/cuda/fp6_llm/fp6_linear.cu index 51413a0874..30b0978a1a 100644 --- a/torchao/csrc/cuda/fp6_llm/fp6_linear.cu +++ b/torchao/csrc/cuda/fp6_llm/fp6_linear.cu @@ -144,8 +144,8 @@ torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats, int num_in_feats = _in_feats.size(0); int num_in_channels = _in_feats.size(1); int num_out_channels = _weights.size(0); - assert( num_in_channels%64 == 0 ); - assert( (num_in_channels/16*3) == _weights.size(1) ); // Making sure the K dimension is matched. + TORCH_CHECK(num_in_channels%64 == 0, "Expected in_features to be a multiple of 64, but received ", num_in_channels); + TORCH_CHECK((num_in_channels/16*3) == _weights.size(1)); // Making sure the K dimension is matched. // int M = num_out_channels; int K = num_in_channels; diff --git a/torchao/quantization/fp6_llm.py b/torchao/quantization/fp6_llm.py new file mode 100644 index 0000000000..9f559d4164 --- /dev/null +++ b/torchao/quantization/fp6_llm.py @@ -0,0 +1,160 @@ +from typing import Optional + +import torch +from torch import nn, Tensor +from torchao.dtypes.float6_e3m2 import FLOAT6_E3M2_MAX, to_float6_e3m2, from_float6_e3m2 +from torchao.ops import fp16act_fp6weight_linear + + +def _pack_2bit(x: Tensor) -> Tensor: + return (x[..., ::4] << 6) | (x[..., 1::4] << 4) | (x[..., 2::4] << 2) | x[..., 3::4] + + +def _unpack_2bit(x: Tensor) -> Tensor: + return torch.stack([x >> 6, (x >> 4) & 0b11, (x >> 2) & 0b11, x & 0b11], dim=-1).flatten(-2) + + +def _pack_4bit(x: Tensor) -> Tensor: + return (x[..., ::2] << 4) | x[..., 1::2] + + +def _unpack_4bit(x: Tensor) -> Tensor: + return torch.stack([x >> 4, x & 0b1111], dim=-1).flatten(-2) + + +# this is a literal adaptation of FP6-LLM ahead-of-time bit-level pre-packing +# https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_prepacking.h +def _to_tc_float6_e3m2_original(tensor: Tensor) -> Tensor: + assert tensor.ndim == 2 + M, N = tensor.shape + assert (M % 64 == 0) and (N % 64 == 0) + + tensor_fp6 = to_float6_e3m2(tensor, no_bit_packing=True) + + # Pass 1 from original code + tensor_fp6 = tensor_fp6.view(M // 64, 4, 2, 8, N // 16, 2, 8) + tensor_fp6 = tensor_fp6.permute(0, 4, 1, 5, 2, 3, 6) + tensor_fp6 = tensor_fp6.reshape(-1, 32, 2) + tensor_fp6 = tensor_fp6.permute(1, 0, 2) + tensor_fp6 = tensor_fp6.flatten() + + tensor_2bit = _pack_2bit((tensor_fp6 >> 4) & 0b11) + tensor_4bit = _pack_4bit(tensor_fp6 & 0b1111) + + # Pass 2 from original code + tensor_2bit = tensor_2bit.view(32, -1, 4).permute(1, 0, 2).flip(2) + tensor_4bit = tensor_4bit.view(32, -1, 4).permute(1, 0, 2).flip(2) + + # Pass 3 from original code + # BitInterleaving_2bit + # the 1st and 3rd permutations are needed because the author unpacks/packs the values from/to uint32 + # while we still unpack/pack the values from/to uint8 + tensor_2bit = _unpack_2bit(tensor_2bit).view(-1, 16) + tensor_2bit = tensor_2bit[:, [12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3]] + tensor_2bit = tensor_2bit[:, [1, 5, 9, 13, 3, 7, 11, 15, 0, 4, 8, 12, 2, 6, 10, 14]] + tensor_2bit = tensor_2bit[:, [12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3]] + tensor_2bit = _pack_2bit(tensor_2bit).view(-1) + + # BitInterleaving_4bit + # the 1st and 3rd permutations are needed because the author unpacks/packs the values from/to uint32 + # while we still unpack/pack the values from/to uint8 + tensor_4bit = _unpack_4bit(tensor_4bit).view(-1, 8) + tensor_4bit = tensor_4bit[:, [4, 5, 6, 7, 0, 1, 2, 3]] + tensor_4bit = tensor_4bit[:, [1, 5, 3, 7, 0, 4, 2, 6]] + tensor_4bit = tensor_4bit[:, [4, 5, 6, 7, 0, 1, 2, 3]] + tensor_4bit = _pack_4bit(tensor_4bit).view(-1) + + return torch.cat([tensor_2bit, tensor_4bit], dim=0) + + +# more optimized version of _to_tc_float6_e3m2_original() by merging ops +# https://github.com/usyd-fsalab/fp6_llm/blob/ce76774bcfc26b325c1b558abcf1935026d9abbc/fp6_llm/csrc/utils/weight_prepacking.h +def to_tc_float6_e3m2(tensor: Tensor) -> Tensor: + assert tensor.ndim == 2 + M, N = tensor.shape + assert (M % 64 == 0) and (N % 64 == 0) + + tensor_fp6 = to_float6_e3m2(tensor, no_bit_packing=True) + tensor_fp6 = tensor_fp6.view(M // 64, 2, 2, 2, 8, N // 16, 2, 8) + tensor_fp6 = tensor_fp6.flip(3) + + tensor_2bit = (tensor_fp6 >> 4) & 0b11 + tensor_2bit = tensor_2bit.permute(0, 5, 1, 4, 7, 3, 2, 6) + tensor_2bit = _pack_2bit(tensor_2bit.flatten()) + + tensor_4bit = tensor_fp6 & 0b1111 + tensor_4bit = tensor_4bit.permute(0, 5, 1, 2, 4, 7, 3, 6) + tensor_4bit = _pack_4bit(tensor_4bit.flatten()) + + return torch.cat([tensor_2bit, tensor_4bit], dim=0) + + +def from_tc_float6_e3m2(tensor: Tensor, M: int, N: int, dtype: torch.dtype = torch.float32) -> Tensor: + assert tensor.ndim == 1 + assert (M % 64 == 0) and (N % 64 == 0) + size_2bit = M * N // 4 + size_4bit = M * N // 2 + assert tensor.numel() == size_2bit + size_4bit + + tensor_2bit, tensor_4bit = tensor.split([size_2bit, size_4bit]) + + tensor_2bit = _unpack_2bit(tensor_2bit) + tensor_2bit = tensor_2bit.view(M // 64, N // 16, 2, 8, 8, 2, 2, 2) + tensor_2bit = tensor_2bit.permute(0, 2, 6, 5, 3, 1, 7, 4) + + tensor_4bit = _unpack_4bit(tensor_4bit) + tensor_4bit = tensor_4bit.view(M // 64, N // 16, 2, 2, 8, 8, 2, 2) + tensor_4bit = tensor_4bit.permute(0, 2, 3, 6, 4, 1, 7, 5) + + tensor_fp6 = (tensor_2bit << 4) | tensor_4bit + tensor_fp6 = tensor_fp6.flip(3).reshape(M, N) + return from_float6_e3m2(tensor_fp6, no_bit_packing=True, dtype=dtype) + + +class Fp6LlmLinear(nn.Module): + """FP6-LLM Linear layer as described in https://arxiv.org/pdf/2401.14112. + """ + + def __init__(self, weight: Tensor, scales: Tensor, bias: Optional[Tensor] = None) -> None: + super().__init__() + self.register_buffer("weight", weight) + self.register_buffer("scales", scales) + self.register_buffer("bias", bias) + self.out_features = weight.shape[0] + self.in_features = weight.shape[1] * 16 // 3 + + def forward(self, x: Tensor) -> Tensor: + # TODO: splitK map + out = fp16act_fp6weight_linear(x.view(-1, self.in_features).half(), self.weight, self.scales, splitK=1) + if self.bias is not None: + out = out + self.bias + return out.view(*x.shape[:-1], self.out_features).to(x.dtype) + + @classmethod + def from_float(cls, linear: nn.Linear): + assert (linear.in_features % 64 == 0) and (linear.out_features % 256 == 0) + + fp32_weight = linear.weight.detach().float() + scales = fp32_weight.abs().amax(1) / FLOAT6_E3M2_MAX + scales[scales == 0.0] = 1.0 # avoid 0 scale + + tc_fp6_weight = to_tc_float6_e3m2(fp32_weight / scales.view(-1, 1)) + tc_fp6_weight = tc_fp6_weight.view(linear.out_features, -1).view(torch.int32) + + bias = linear.bias.detach().half() if linear.bias is not None else None + return cls(tc_fp6_weight, scales.half(), bias) + + def extra_repr(self) -> str: + return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}' + + +def convert_fp6_llm(model: nn.Module, skip_fqn_list: Optional[list[str]] = None, cur_fqn: str = "") -> None: + for name, child in model.named_children(): + new_fqn = name if cur_fqn == "" else f"{cur_fqn}.{name}" + + if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (isinstance(child, nn.Linear)): + if (child.in_features % 64 == 0) and (child.out_features % 256 == 0): + new_child = Fp6LlmLinear.from_float(child) + setattr(model, name, new_child) + else: + convert_fp6_llm(child, skip_fqn_list, new_fqn) From 5b04ff03c9d2fc8213a74960a7b842c61b05dae1 Mon Sep 17 00:00:00 2001 From: andrewor14 Date: Tue, 28 May 2024 12:06:42 -0400 Subject: [PATCH 59/61] Extract eval code from GPTQ for more general usage (#275) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: This commit extracts all the eval code from GPTQ.py. This is the first step towards having a general eval framework in torchao. The eventual goal is to use lm_eval to produce reproducible benchmarks for the quantization APIs in torchao that we can showcase on the main README. This will have the added benefit of allowing us to add (possibly nightly) regression test suites for important models. Test Plan: ``` 2024-05-24:14:50:32,647 INFO [task.py:395] Building contexts for wikitext on rank 0... 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1132.98it/s] 2024-05-24:14:50:32,648 INFO [evaluator.py:362] Running loglikelihood_rolling requests 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:51<00:00, 51.39s/it] wikitext: {'word_perplexity,none': 7.877762491958485, 'word_perplexity_stderr,none': 'N/A', 'byte_perplexity,none': 1.488984329919892, 'byte_perplexity_stderr,none': 'N/A', 'bits_per_byte,none': 0.5743285710685551, 'bits_per_byte_stderr,none': 'N/A', 'alias': 'wikitext'} . ---------------------------------------------------------------------- Ran 1 test in 858.105s OK ``` python test/quantization/test_quant_api.py -k test_8da4w_gptq_quantizer --- test/quantization/test_quant_api.py | 17 +- torchao/_eval.py | 228 +++++++++++++++++++++++ torchao/quantization/GPTQ.py | 275 ++-------------------------- torchao/quantization/utils.py | 27 +++ 4 files changed, 284 insertions(+), 263 deletions(-) create mode 100644 torchao/_eval.py diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index 24882b8418..70c2562bb3 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -186,9 +186,14 @@ def test_8da4w_quantizer(self): assert isinstance(m.linear2, Int8DynActInt4WeightLinear) m(*example_inputs) + # TODO: save model weights as artifacts and re-enable in CI + # For now, to run this test, you will need to download the weights from HF + # and run this script to convert them: + # https://github.com/pytorch-labs/gpt-fast/blob/6253c6bb054e658d67566150f87329b87815ae63/scripts/convert_hf_checkpoint.py @unittest.skip("skipping until we get checkpoints for gpt-fast") def test_8da4w_gptq_quantizer(self): - from torchao.quantization.GPTQ import Int8DynActInt4WeightGPTQQuantizer, InputRecorder, TransformerEvalWrapper + from torchao.quantization.GPTQ import Int8DynActInt4WeightGPTQQuantizer + from torchao._eval import InputRecorder, TransformerEvalWrapper # should be similar to TorchCompileDynamicQuantizer precision = torch.bfloat16 device = "cpu" @@ -250,7 +255,7 @@ def test_8da4w_gptq_quantizer(self): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "skipping when torch verion is 2.4 or lower") def test_8da4w_quantizer_eval(self): from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer - from torchao.quantization.GPTQ import TransformerEvalWrapper + from torchao._eval import TransformerEvalWrapper precision = torch.bfloat16 device = "cpu" @@ -284,7 +289,8 @@ def test_8da4w_quantizer_eval(self): @unittest.skip("skipping until we get checkpoints for gpt-fast") def test_gptq_quantizer_int4wo(self): - from torchao.quantization.GPTQ import Int4WeightOnlyGPTQQuantizer, InputRecorder, TransformerEvalWrapper + from torchao.quantization.GPTQ import Int4WeightOnlyGPTQQuantizer + from torchao._eval import InputRecorder, TransformerEvalWrapper precision = torch.bfloat16 device = "cuda" checkpoint_path = Path("../gpt-fast/checkpoints/meta-llama/Llama-2-7b-chat-hf/model.pth") @@ -343,7 +349,8 @@ def test_gptq_quantizer_int4wo(self): @unittest.skip("skipping until we get checkpoints for gpt-fast") def test_quantizer_int4wo(self): - from torchao.quantization.GPTQ import Int4WeightOnlyQuantizer, TransformerEvalWrapper + from torchao.quantization.GPTQ import Int4WeightOnlyQuantizer + from torchao._eval import TransformerEvalWrapper precision = torch.bfloat16 device = "cuda" checkpoint_path = Path("../gpt-fast/checkpoints/meta-llama/Llama-2-7b-chat-hf/model.pth") @@ -378,7 +385,7 @@ def test_quantizer_int4wo(self): @unittest.skip("skipping until we get checkpoints for gpt-fast") def test_eval_wrapper(self): - from torchao.quantization.GPTQ import TransformerEvalWrapper + from torchao._eval import TransformerEvalWrapper precision = torch.bfloat16 device = "cuda" checkpoint_path = Path("../gpt-fast/checkpoints/meta-llama/Llama-2-7b-chat-hf/model.pth") diff --git a/torchao/_eval.py b/torchao/_eval.py new file mode 100644 index 0000000000..c7e6ce8381 --- /dev/null +++ b/torchao/_eval.py @@ -0,0 +1,228 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from .utils import _lm_eval_available, _MultiInput + +if _lm_eval_available: + try: # lm_eval version 0.4 + from lm_eval.evaluator import evaluate # pyre-ignore[21] + from lm_eval.models.huggingface import HFLM as eval_wrapper # pyre-ignore[21] + from lm_eval.tasks import get_task_dict # pyre-ignore[21] + except: # lm_eval version 0.3 + from lm_eval import base, evaluator, tasks + + eval_wrapper = base.BaseLM + get_task_dict = tasks.get_task_dict + evaluate = evaluator.evaluate + + class InputRecorder(eval_wrapper): + """ + This is a fake evaluation wrapper from the lm_eval library that just records the inputs + so that they can be used in calibration. + + If pad_calibration_inputs is enabled, the input recorder will take + each input and pad/truncate it down to the calibration_seq_length. + (if using padding you should set the embeddings for the pad_token to 0 + in the model) + + Note: after padding/truncation, input_prep_function is called to bring + it to the proper form to be inserted into a given model. + + If not, it will only truncate inputs to the desired length. + """ + + def __init__( + self, + tokenizer, + calibration_seq_length, + input_prep_func=None, + pad_calibration_inputs=False, + vocab_size=32000, + pad_token=0, + device="cpu", + ): + super().__init__() + self._tokenizer = tokenizer + self._device = torch.device(device) + self.vocab_size = vocab_size + self._max_seq_length = calibration_seq_length + self.calibration_seq_length = calibration_seq_length + + # need to take inps and convert to corrent input + # for model + self.input_prep_func = ( + input_prep_func if input_prep_func is not None + else lambda x: (x,) + ) + + self.pad_calibration_inputs = pad_calibration_inputs + self.pad_token = pad_token + + self.inputs = None + + @property + def eot_token_id(self): + try: + return self._tokenizer.eos_id() + except: + return self._tokenizer.eos_id + + @property + def max_length(self): + return self._max_seq_length + + @property + def max_gen_toks(self): + return 50 + + @property + def batch_size(self): + return 1 + + @property + def device(self): + return self._device + + def tok_encode(self, string: str, **kwargs): + # TODO: verify this for multi-batch as well + tokens = self._tokenizer.encode(string) + if hasattr(self._tokenizer, "bos_id"): + try: + tokens = [self._tokenizer.bos_id()] + tokens + except: + tokens = [self._tokenizer.bos_id] + tokens + return tokens + + def tok_decode(self, tokens): + decoded = self._tokenizer.decode(tokens) + return decoded + + def add_input(self, args): + if self.inputs is None: + self.inputs = [_MultiInput([arg]) for arg in args] + else: + self.inputs = [ + multi.add_input(arg) for (multi, arg) in zip(self.inputs, args) + ] + + def record_inputs( + self, + calibration_tasks, + calibration_limit, + ): + try: + lm_eval.tasks.initialize_tasks() + except: + pass + + task_dict = get_task_dict(calibration_tasks) + print("Obtaining GPTQ calibration inputs on: ", calibration_tasks) + + evaluate( + self, + task_dict, + limit=calibration_limit, + ) + return self + + def get_inputs(self): + return self.inputs + + def _model_call(self, inps): + inps = inps.squeeze(0) + T = len(inps) + if ( + # can't use inputs that are too short when padding disabled + (T < self.calibration_seq_length and not self.pad_calibration_inputs) + or + # can't use inputs that actually use token we use for padding + (self.pad_calibration_inputs and self.pad_token in inps) + ): + # give random output + return torch.randn( + (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device + ) + + # pad or truncate to the right size + if T >= self.calibration_seq_length: + inps = inps[: self.calibration_seq_length] + else: + inps = F.pad(inps, (self.pad_token, self.calibration_seq_length - T)) + + inps = inps.unsqueeze(0) + model_in = self.input_prep_func(inps) + + self.add_input(model_in) + + # output `something` with correct shape to keep eval going + return torch.randn( + (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device + ) + + def _model_generate(self, context, max_length, eos_token_id): + raise Exception("unimplemented") + + class TransformerEvalWrapper(InputRecorder): + """ + A wrapper class for GPTFast, providing integration with the lm-evaluation-harness library. + """ + def __init__( + self, + model, + tokenizer, + max_seq_length, + input_prep_func=None, + device="cuda" + ): + super().__init__(None, None) + self._model = model + self._tokenizer = tokenizer + self._device = torch.device(device) + self._max_seq_length = max_seq_length + + # need to take inps and convert to corrent input + # for model + self.input_prep_func = ( + input_prep_func if input_prep_func is not None + else lambda x: (x,) + ) + + def _model_call(self, inps): + # TODO: make batches work + input = self.input_prep_func(inps) + + max_seq_length = min(inps.size(1), self.max_length) + with torch.device(self._device): + self._model.setup_caches(self.batch_size, max_seq_length) + logits = self._model(*input) + return logits + + def _model_generate(self, context, max_length, eos_token_id): + raise Exception('unimplemented') + + def run_eval(self, tasks, limit): + try: + lm_eval.tasks.initialize_tasks() + except: + pass + + task_dict = get_task_dict(tasks) + print("Evaluating Model On: ", task_dict) + with torch.no_grad(): + result = evaluate( + self, + task_dict, + limit=limit, + ) + for task, res in result["results"].items(): + print(f"{task}: {res}") + return result diff --git a/torchao/quantization/GPTQ.py b/torchao/quantization/GPTQ.py index e7176b4fd2..f0c16f86d9 100644 --- a/torchao/quantization/GPTQ.py +++ b/torchao/quantization/GPTQ.py @@ -19,7 +19,12 @@ from torch.utils._pytree import tree_flatten, tree_unflatten -from .utils import TORCH_VERSION_AFTER_2_3, find_multiple +from .utils import ( + _lm_eval_available, + _MultiInput, + TORCH_VERSION_AFTER_2_3, + find_multiple, +) from typing import Any, Dict, Optional from .unified import Quantizer @@ -32,266 +37,20 @@ ) aten = torch.ops.aten -## eval.py ## - -try: - import lm_eval # pyre-ignore[21] # noqa: F401 - - lm_eval_available = True -except: - lm_eval_available = False - -if lm_eval_available: - try: # lm_eval version 0.4 - from lm_eval.evaluator import evaluate # pyre-ignore[21] - from lm_eval.models.huggingface import HFLM as eval_wrapper # pyre-ignore[21] - from lm_eval.tasks import get_task_dict # pyre-ignore[21] - except: # lm_eval version 0.3 - from lm_eval import base, evaluator, tasks - - eval_wrapper = base.BaseLM - get_task_dict = tasks.get_task_dict - evaluate = evaluator.evaluate -else: +if not _lm_eval_available: logging.info("lm_eval is not installed, GPTQ may not be usable") add_ons = [] -if lm_eval_available: - add_ons += ["InputRecorder", "TransformerEvalWrapper"] - if TORCH_VERSION_AFTER_2_3: add_ons += ["Int8DynActInt4WeightQuantizer", "Int8DynActInt4WeightGPTQQuantizer"] __all__ = [ - "MultiInput", "Int4WeightOnlyGPTQQuantizer", "Int4WeightOnlyQuantizer", ] + add_ons -if lm_eval_available: - class InputRecorder(eval_wrapper): - """ - This is a fake evaluation wrapper from the lm_eval library that just records the inputs - so that they can be used in calibration. - - If pad_calibration_inputs is enabled, the input recorder will take - each input and pad/truncate it down to the calibration_seq_length. - (if using padding you should set the embeddings for the pad_token to 0 - in the model) - - Note: after padding/truncation, input_prep_function is called to bring - it to the proper form to be inserted into a given model. - - If not, it will only truncate inputs to the desired length. - """ - - def __init__( - self, - tokenizer, - calibration_seq_length, - input_prep_func=None, - pad_calibration_inputs=False, - vocab_size=32000, - pad_token=0, - device="cpu", - ): - super().__init__() - self._tokenizer = tokenizer - self._device = torch.device(device) - self.vocab_size = vocab_size - self._max_seq_length = calibration_seq_length - self.calibration_seq_length = calibration_seq_length - - # need to take inps and convert to corrent input - # for model - self.input_prep_func = ( - input_prep_func if input_prep_func is not None - else lambda x: (x,) - ) - - self.pad_calibration_inputs = pad_calibration_inputs - self.pad_token = pad_token - - self.inputs = None - - @property - def eot_token_id(self): - try: - return self._tokenizer.eos_id() - except: - return self._tokenizer.eos_id - - @property - def max_length(self): - return self._max_seq_length - - @property - def max_gen_toks(self): - return 50 - - @property - def batch_size(self): - return 1 - - @property - def device(self): - return self._device - - def tok_encode(self, string: str, **kwargs): - # TODO: verify this for multi-batch as well - tokens = self._tokenizer.encode(string) - if hasattr(self._tokenizer, "bos_id"): - try: - tokens = [self._tokenizer.bos_id()] + tokens - except: - tokens = [self._tokenizer.bos_id] + tokens - return tokens - - def tok_decode(self, tokens): - decoded = self._tokenizer.decode(tokens) - return decoded - - def add_input(self, args): - if self.inputs is None: - self.inputs = [MultiInput([arg]) for arg in args] - else: - self.inputs = [ - multi.add_input(arg) for (multi, arg) in zip(self.inputs, args) - ] - - def record_inputs( - self, - calibration_tasks, - calibration_limit, - ): - try: - lm_eval.tasks.initialize_tasks() - except: - pass - - task_dict = get_task_dict(calibration_tasks) - print("Obtaining GPTQ calibration inputs on: ", calibration_tasks) - - evaluate( - self, - task_dict, - limit=calibration_limit, - ) - return self - - def get_inputs(self): - return self.inputs - - def _model_call(self, inps): - inps = inps.squeeze(0) - T = len(inps) - if ( - # can't use inputs that are too short when padding disabled - (T < self.calibration_seq_length and not self.pad_calibration_inputs) - or - # can't use inputs that actually use token we use for padding - (self.pad_calibration_inputs and self.pad_token in inps) - ): - # give random output - return torch.randn( - (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device - ) - - # pad or truncate to the right size - if T >= self.calibration_seq_length: - inps = inps[: self.calibration_seq_length] - else: - inps = F.pad(inps, (self.pad_token, self.calibration_seq_length - T)) - - inps = inps.unsqueeze(0) - model_in = self.input_prep_func(inps) - - self.add_input(model_in) - - # output `something` with correct shape to keep eval going - return torch.randn( - (1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device - ) - - def _model_generate(self, context, max_length, eos_token_id): - raise Exception("unimplemented") - - class TransformerEvalWrapper(InputRecorder): - """ - A wrapper class for GPTFast, providing integration with the lm-evaluation-harness library. - """ - def __init__( - self, - model, - tokenizer, - max_seq_length, - input_prep_func=None, - device="cuda" - ): - super().__init__(None, None) - self._model = model - self._tokenizer = tokenizer - self._device = torch.device(device) - self._max_seq_length = max_seq_length - - # need to take inps and convert to corrent input - # for model - self.input_prep_func = ( - input_prep_func if input_prep_func is not None - else lambda x: (x,) - ) - - def _model_call(self, inps): - # TODO: make batches work - input = self.input_prep_func(inps) - - max_seq_length = min(inps.size(1), self.max_length) - with torch.device(self._device): - self._model.setup_caches(self.batch_size, max_seq_length) - logits = self._model(*input) - return logits - - def _model_generate(self, context, max_length, eos_token_id): - raise Exception('unimplemented') - - def run_eval(self, tasks, limit): - try: - lm_eval.tasks.initialize_tasks() - except: - pass - - task_dict = get_task_dict(tasks) - print("Evaluating Model On: ", task_dict) - with torch.no_grad(): - result = evaluate( - self, - task_dict, - limit=limit, - ) - for task, res in result["results"].items(): - print(f"{task}: {res}") - return result - -class MultiInput: - - def __init__(self, inputs): - - self.values = list(inputs) - - def add_input(self, input): - self.values.append(input) - return self - - def __getitem__(self, slice): - return MultiInput(self.values[slice]) - - def cuda(self): - self.values = [ - val.cuda() if isinstance(val, torch.Tensor) else val for val in self.values - ] - class GenericGPTQRunner(fx.Interpreter): """ @@ -308,7 +67,7 @@ class GenericGPTQRunner(fx.Interpreter): def __init__( self, model, - inputs: MultiInput, + inputs: _MultiInput, blocksize=128, percdamp=0.01, groupsize=128, @@ -407,22 +166,22 @@ def tensors_to_cuda(args): # flatten args and kwargs together flat_args, spec = tree_flatten((args, kwargs)) - # move all single tensors to cuda, will move MultiInputs to cuda one at a time + # move all single tensors to cuda, will move _MultiInputs to cuda one at a time flat_args = tensors_to_cuda(flat_args) - has_multi_input = MultiInput in [type(x) for x in flat_args] + has_multi_input = _MultiInput in [type(x) for x in flat_args] if has_multi_input: # Just some trickery to convert - # [MultiInput[a, a, a], MultiInput(b, b, b)] => [a, b], [a, b], [a, b] + # [_MultiInput[a, a, a], _MultiInput(b, b, b)] => [a, b], [a, b], [a, b] multi_input_count = max( - [len(x.values) if isinstance(x, MultiInput) else 1 for x in flat_args] + [len(x.values) if isinstance(x, _MultiInput) else 1 for x in flat_args] ) transposed_args = list( zip( *[ ( x.values - if isinstance(x, MultiInput) + if isinstance(x, _MultiInput) else [x] * multi_input_count ) for x in flat_args @@ -551,7 +310,7 @@ def SQNR(x, y): ) return new_out - return MultiInput(outputs) if has_multi_input else outputs[0] + return _MultiInput(outputs) if has_multi_input else outputs[0] def faster_quant(self, H, W): percdamp = self.percdamp @@ -751,7 +510,7 @@ def _convert_for_runtime(self, model: torch.nn.Module) -> "nn.Module": raise NotImplementedError("_convert_for_runtime not implemented") @torch.no_grad() - def quantize(self, model: torch.nn.Module, inputs: List[MultiInput], **kwargs: Any) -> torch.nn.Module: + def quantize(self, model: torch.nn.Module, inputs: List[_MultiInput], **kwargs: Any) -> torch.nn.Module: pass def _check_linear_int4_k(k, groupsize = 1, inner_k_tiles = None): @@ -981,7 +740,7 @@ def _convert_for_runtime(self, model): ) return model - def quantize(self, model: torch.nn.Module, inputs: List[MultiInput], **kwargs: Any) -> torch.nn.Module: + def quantize(self, model: torch.nn.Module, inputs: List[_MultiInput], **kwargs: Any) -> torch.nn.Module: state_dict = self._create_quantized_state_dict( model, inputs, @@ -1327,7 +1086,7 @@ def _convert_for_runtime(self, model): ) return model - def quantize(self, model: torch.nn.Module, inputs: List[MultiInput], **kwargs: Any) -> torch.nn.Module: + def quantize(self, model: torch.nn.Module, inputs: List[_MultiInput], **kwargs: Any) -> torch.nn.Module: state_dict = self._create_quantized_state_dict( model, inputs, diff --git a/torchao/quantization/utils.py b/torchao/quantization/utils.py index 74cb7deb20..78a76863f3 100644 --- a/torchao/quantization/utils.py +++ b/torchao/quantization/utils.py @@ -22,6 +22,13 @@ "TORCH_VERSION_AFTER_2_3", ] +try: + import lm_eval # pyre-ignore[21] # noqa: F401 + + _lm_eval_available = True +except: + _lm_eval_available = False + def find_multiple(n: int, *args: Tuple[int]) -> int: k: int = reduce(lambda x, y: x * y // gcd(x, y), args + (1,)) # type: ignore[9] @@ -146,6 +153,26 @@ def get_model_size_in_bytes(model): s += b.nelement() * b.element_size() return s + +class _MultiInput: + + def __init__(self, inputs): + + self.values = list(inputs) + + def add_input(self, input): + self.values.append(input) + return self + + def __getitem__(self, slice): + return _MultiInput(self.values[slice]) + + def cuda(self): + self.values = [ + val.cuda() if isinstance(val, torch.Tensor) else val for val in self.values + ] + + # TODO: quantization namespace is not the right place ot have this if version.parse(torch.__version__) >= version.parse("2.4.0.dev"): TORCH_VERSION_AFTER_2_4 = True From a7483f25e2425cd6a3b163174dbd90d0201d4d6f Mon Sep 17 00:00:00 2001 From: Vasiliy Kuznetsov Date: Tue, 28 May 2024 11:01:24 -0700 Subject: [PATCH 60/61] Add a prototype of MX format training and inference (#264) Summary: The MX numerical formats are new low precision formats with recent acceptance into the OCP spec: https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf This PR adds a reference native PyTorch implementation of training and inference primitives for using MX accelerated matrix multiplications. Currently, we use a reference layout (scale and raw data stored separately) and an emulated matrix multiplication. Test Plan: ``` // tests pytest -s test/prototype/mx_formats/* // benchmarks python torchao/prototype/mx_formats/benchmarks/bench_qdq.py ``` Reviewers: Subscribers: Tasks: Tags: --- README.md | 1 + dev-requirements.txt | 2 + test/prototype/mx_formats/test_custom_cast.py | 388 ++++++++++ test/prototype/mx_formats/test_mx_linear.py | 210 ++++++ test/prototype/mx_formats/test_mx_tensor.py | 265 +++++++ torchao/prototype/mx_formats/README.md | 102 +++ torchao/prototype/mx_formats/__init__.py | 0 .../mx_formats/benchmarks/bench_qdq.py | 148 ++++ torchao/prototype/mx_formats/config.py | 2 + torchao/prototype/mx_formats/constants.py | 51 ++ torchao/prototype/mx_formats/custom_cast.py | 713 ++++++++++++++++++ .../prototype/mx_formats/fp_format_spec.py | 550 ++++++++++++++ torchao/prototype/mx_formats/mx_linear.py | 160 ++++ torchao/prototype/mx_formats/mx_ops.py | 158 ++++ torchao/prototype/mx_formats/mx_tensor.py | 416 ++++++++++ torchao/utils.py | 15 + 16 files changed, 3181 insertions(+) create mode 100644 test/prototype/mx_formats/test_custom_cast.py create mode 100644 test/prototype/mx_formats/test_mx_linear.py create mode 100644 test/prototype/mx_formats/test_mx_tensor.py create mode 100644 torchao/prototype/mx_formats/README.md create mode 100644 torchao/prototype/mx_formats/__init__.py create mode 100644 torchao/prototype/mx_formats/benchmarks/bench_qdq.py create mode 100644 torchao/prototype/mx_formats/config.py create mode 100644 torchao/prototype/mx_formats/constants.py create mode 100644 torchao/prototype/mx_formats/custom_cast.py create mode 100644 torchao/prototype/mx_formats/fp_format_spec.py create mode 100644 torchao/prototype/mx_formats/mx_linear.py create mode 100644 torchao/prototype/mx_formats/mx_ops.py create mode 100644 torchao/prototype/mx_formats/mx_tensor.py diff --git a/README.md b/README.md index 2328c67c8b..a573df54c8 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,7 @@ To learn more try out our APIs, you can check out API examples in 3. Support for lower precision [dtypes](./torchao/dtypes) such as - [nf4](https://github.com/pytorch/ao/blob/main/torchao/dtypes/nf4tensor.py) which was used to [implement QLoRA](https://github.com/pytorch/torchtune/blob/main/docs/source/tutorials/qlora_finetune.rst) without writing custom Triton or CUDA code - [uint4](https://github.com/pytorch/ao/blob/main/torchao/dtypes/uint4.py) + - [MX](https://github.com/pytorch/ao/blob/main/torchao/prototype/mx_formats) implementing training and inference support with tensors using the [OCP MX spec](https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf) data types, which can be described as groupwise scaled float8/float6/float4/int8, with the scales being constrained to powers of two. This work is prototype as the hardware support is not available yet. 4. [Bleeding Edge Kernels](./torchao/prototype/) for experimental kernels without backwards compatibility guarantees - [GaLore](https://github.com/pytorch/ao/tree/main/torchao/prototype/galore) for memory efficient finetuning - [fused HQQ Gemm Kernel](https://github.com/pytorch/ao/tree/main/torchao/prototype/hqq) for compute bound workloads diff --git a/dev-requirements.txt b/dev-requirements.txt index 156e8766d2..4d61858747 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,6 +9,8 @@ transformers bitsandbytes #needed for testing triton quant / dequant ops for 8-bit optimizers matplotlib pandas +fire # QOL for commandline scripts +tabulate # QOL for printing tables to stdout # Custom CUDA Extensions ninja diff --git a/test/prototype/mx_formats/test_custom_cast.py b/test/prototype/mx_formats/test_custom_cast.py new file mode 100644 index 0000000000..892d5b57f7 --- /dev/null +++ b/test/prototype/mx_formats/test_custom_cast.py @@ -0,0 +1,388 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import pytest + +import torch + +import torchao.prototype.mx_formats.config as config +from torch.utils._triton import has_triton +from torchao.prototype.mx_formats.constants import ( + DTYPE_FP4, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + F4_E2M1_EXP_BIAS, + F6_E2M3_EXP_BIAS, + F6_E3M2_EXP_BIAS, +) + +from torchao.prototype.mx_formats.custom_cast import ( + f32_to_f4_unpacked, + f32_to_f6_e2m3_unpacked, + f32_to_f6_e3m2_unpacked, + f4_unpacked_to_f32, + f6_e2m3_unpacked_to_f32, + f6_e3m2_unpacked_to_f32, + get_bits, + pack_uint4, + triton_f4_to_bf16, + unpack_uint4, +) + +from torchao.prototype.mx_formats.fp_format_spec import ( + _assert_equals, + dtype_to_interesting_values, + float4_e2m1_interesting_values, + float6_e2m3_interesting_values, + float6_e3m2_interesting_values, + get_sem_bits, + sem_bits_to_sem_vals, + sem_vals_to_f32, +) + +from torchao.prototype.mx_formats.mx_tensor import MXTensor +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 + +if not TORCH_VERSION_AFTER_2_4: + pytest.skip("Unsupported PyTorch version", allow_module_level=True) + +torch.manual_seed(0) + + +@pytest.mark.skip( + reason="TODO debug CI failure, low pri since this is not used in the MX code" # noqa: E501 +) +def test_fp32(): + dtype = torch.float + interesting_values = dtype_to_interesting_values[dtype] + for fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, _notes in interesting_values: + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + +@pytest.mark.skip( + reason="TODO debug CI failure, low pri since this is not used in the MX code" # noqa: E501 +) +def test_bf16(): + dtype = torch.bfloat16 + interesting_values = dtype_to_interesting_values[dtype] + for fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, _notes in interesting_values: + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + +def test_fp16(): + dtype = torch.float16 + interesting_values = dtype_to_interesting_values[dtype] + for fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, _notes in interesting_values: + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + +def test_float8_e4m3fn(): + dtype = torch.float8_e4m3fn + interesting_values = dtype_to_interesting_values[dtype] + for fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, _notes in interesting_values: + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + +def test_float8_e5m2(): + dtype = torch.float8_e5m2 + interesting_values = dtype_to_interesting_values[dtype] + for fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, _notes in interesting_values: + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + +def _sem_enc_to_fp32_val(s_enc, e_enc, m_enc, is_zero, is_denorm, exp_bias): + s_i = 1.0 if s_enc == "0" else -1.0 + if is_zero: + e_i = 0 + m_f = 0.0 + elif is_denorm: + e_i = int(e_enc, 2) - exp_bias + 1 + m_f = 0.0 + cur_pow_of_two = -1 + for m_bit in m_enc: + m_f += int(m_bit, 2) * pow(2, cur_pow_of_two) + cur_pow_of_two -= 1 + else: + e_i = int(e_enc, 2) - exp_bias + m_f = 1.0 + cur_pow_of_two = -1 + for m_bit in m_enc: + m_f += int(m_bit, 2) * pow(2, cur_pow_of_two) + cur_pow_of_two -= 1 + fp32 = s_i * (2**e_i) * m_f + return fp32 + + +def test_float4_e2m1_table(): + for ( + fp32_ref, + _formula, + s_enc, + e_enc, + m_enc, + _label, + ) in float4_e2m1_interesting_values: + is_zero = e_enc == "00" and m_enc == "0" + # normal vs denormal + is_denorm = e_enc == "00" and m_enc == "1" + # get exponent and mantissa + exp_bias = F4_E2M1_EXP_BIAS + fp32 = _sem_enc_to_fp32_val( + s_enc, e_enc, m_enc, is_zero, is_denorm, exp_bias + ) # noqa: E501 + assert abs(fp32_ref - fp32) < 1e-12 + + +def test_float6_e3m2_table(): + for ( + fp32_ref, + _formula, + s_enc, + e_enc, + m_enc, + _label, + ) in float6_e3m2_interesting_values: + is_zero = e_enc == "000" and m_enc == "00" + # normal vs denormal + is_denorm = e_enc == "000" and m_enc != "00" + # get exponent and mantissa + exp_bias = F6_E3M2_EXP_BIAS + fp32 = _sem_enc_to_fp32_val( + s_enc, e_enc, m_enc, is_zero, is_denorm, exp_bias + ) # noqa: E501 + assert abs(fp32_ref - fp32) < 1e-12 + + +def test_float6_e2m3_table(): + for ( + fp32_ref, + _formula, + s_enc, + e_enc, + m_enc, + _label, + ) in float6_e2m3_interesting_values: + is_zero = e_enc == "00" and m_enc == "000" + # normal vs denormal + is_denorm = e_enc == "00" and m_enc != "000" + # get exponent and mantissa + exp_bias = F6_E2M3_EXP_BIAS + fp32 = _sem_enc_to_fp32_val( + s_enc, e_enc, m_enc, is_zero, is_denorm, exp_bias + ) # noqa: E501 + assert abs(fp32_ref - fp32) < 1e-12 + + +# positive float4 vals, in increasing order: +# 0: 0 +# 1: 0.5 +# 2: 1.0 +# 3: 1.5 +# 4: 2.0 +# 5: 3.0 +# 6: 4.0 +# 7: 6.0 +# below we test pos and neg versions of all of these + + +def _test_fp4_case(f32_val, f32_val_ref, f4_enc_ref): + # 1. verify that a fp32 value gets quantized to correct fp4 encoding + # TODO test on cuda + f4_unpacked = f32_to_f4_unpacked(torch.tensor(f32_val)) + s_enc, e_enc, m_enc = get_sem_bits(f4_unpacked, bitwidth=4) + assert s_enc + e_enc + m_enc == f4_enc_ref + + # 2. verify that fp4 value gets dequantized to correct fp32 value + f32_dequantized = f4_unpacked_to_f32(f4_unpacked) + assert f32_val_ref == f32_dequantized.item() + + +def _test_fp4_cases(cases): + # test the exp and mantissa with both values of the sign bit + for s_enc in "0", "1": + s_i = 1.0 if s_enc == "0" else -1.0 + for val, val_ref, em_enc in cases: + _test_fp4_case(s_i * val, s_i * val_ref, s_enc + em_enc) + + +# note: below are written as individual test cases for easy command line +# filtering with pytest, i.e. "-k fp4_0_0" + +# Explanation of tie-to-even test cases: +# 1. read https://stackoverflow.com/q/8981913/ +# From above, tie-to-even rule: if GRS == 100, round up if bit before is a 1, # noqa: E501 +# and round down if it's a 0 +# +# 2. assume 1.mm...m for normals and 0.mm...m for denormals. Since +# fp4 has only one mantissa bit we are always rounding after that bit. So, +# G == 0 for fp4 denormal range, and G == 1 for fp4 normal range. +# +# 3. Therefore, when we have a tie (GRS == 100), we round down for fp4 denormals # noqa: E501 +# and round up for fp4 normals: +# 0.25 -> 0.0 (the only denormal case) +# 0.75 -> 1.0 +# 1.25 -> 1.0 +# 1.75 -> 2.0 +# 2.5 -> 2.0 +# 3.5 -> 4.0 +# 5.0 -> 4.0 + + +def test_fp4_0_0(): + cases = [ + (0.25, 0.0, "000"), # tie to even + (0.1, 0.0, "000"), + (0.0, 0.0, "000"), + # note: -0.1 is tested in the negative zero test + ] + _test_fp4_cases(cases) + + +def test_fp4_0_5(): + cases = [ + (0.6, 0.5, "001"), + (0.5, 0.5, "001"), + (0.4, 0.5, "001"), + ] + _test_fp4_cases(cases) + + +def test_fp4_1_0(): + cases = [ + (1.25, 1.0, "010"), # tie to even + (1.1, 1.0, "010"), + (1.0, 1.0, "010"), + (0.9, 1.0, "010"), + (0.75, 1.0, "010"), # tie to even + ] + _test_fp4_cases(cases) + + +def test_fp4_1_5(): + cases = [ + (1.6, 1.5, "011"), + (1.5, 1.5, "011"), + (1.4, 1.5, "011"), + ] + _test_fp4_cases(cases) + + +def test_fp4_2_0(): + cases = [ + (2.5, 2.0, "100"), # tie to even + (2.1, 2.0, "100"), + (2.0, 2.0, "100"), + (1.9, 2.0, "100"), + (1.75, 2.0, "100"), # tie to even + ] + _test_fp4_cases(cases) + + +def test_fp4_3_0(): + cases = [ + (3.1, 3.0, "101"), + (3.0, 3.0, "101"), + (2.9, 3.0, "101"), + ] + _test_fp4_cases(cases) + + +def test_fp4_4_0(): + cases = [ + (5.0, 4.0, "110"), # tie to even + (4.1, 4.0, "110"), + (4.0, 4.0, "110"), + (3.9, 4.0, "110"), + (3.5, 4.0, "110"), # tie to even + ] + _test_fp4_cases(cases) + + +def test_fp4_6_0(): + cases = [ + (6.1, 6.0, "111"), + (6.0, 6.0, "111"), + (5.9, 6.0, "111"), + ] + _test_fp4_cases(cases) + + +def test_fp4_pack_unpack(): + orig_vals = torch.Tensor([[0.0, 0.5, 4.0, -0.0], [-0.0, 1.0, -6.0, 3.0]]) + orig_vals_f4_unpacked = f32_to_f4_unpacked(orig_vals) + orig_vals_f4_packed = pack_uint4(orig_vals_f4_unpacked) + assert orig_vals_f4_packed.numel() == (orig_vals.numel() / 2) + orig_vals_f4_packed_unpacked = unpack_uint4(orig_vals_f4_packed) + orig_vals_dq = f4_unpacked_to_f32(orig_vals_f4_packed_unpacked) + assert torch.all(orig_vals_dq == orig_vals) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_triton(), reason="unsupported without triton") +def test_fp4_triton_unscaled_cast(): + packed_vals = torch.arange(0, 255, dtype=torch.uint8, device="cuda") + f32_ref = f4_unpacked_to_f32(unpack_uint4(packed_vals)) + f32_triton = triton_f4_to_bf16(packed_vals).to(torch.float) + assert torch.all(torch.eq(f32_ref, f32_triton)) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.skipif(not has_triton(), reason="unsupported without triton") +def test_fp4_triton_scaled_cast(): + size = (256,) + orig_vals = torch.randn(size, dtype=torch.float, device="cuda") * 100 + mxtensor = MXTensor.to_mx(orig_vals, block_size=32, elem_dtype=DTYPE_FP4) + + f32_ref = mxtensor.to_dtype(torch.float) + config.use_fp4_custom_triton_dequant_kernel = True + f32_triton = mxtensor.to_dtype(torch.float) + config.use_fp4_custom_triton_dequant_kernel = False + assert torch.all(torch.eq(f32_ref, f32_triton)) + + +@pytest.mark.parametrize("dtype_name", (DTYPE_FP6_E2M3, DTYPE_FP6_E3M2)) +def test_fp6_values(dtype_name): + """ + The fp6 dtypes have 2**6 = 64 unique values each. The test + below tests the f32 -> f6 and f6 -> f32 cast for each value. + + TODO(future PR): also verify rounding tie-to-even works properly. + """ + + for i in range(2**6): + t = torch.tensor(i, dtype=torch.uint8) + bits = get_bits(t.to(torch.int8)) + + # go from bits to f32 ref + if dtype_name == DTYPE_FP6_E2M3: + s_enc, e_enc, m_enc = bits[2], bits[3:5], bits[5:] + elif dtype_name == DTYPE_FP6_E3M2: + s_enc, e_enc, m_enc = bits[2], bits[3:6], bits[6:] + else: + raise AssertionError("unsupported") + s_i, e_i, m_f, special_value = sem_bits_to_sem_vals( + s_enc, e_enc, m_enc, dtype_name + ) + f32_ref = torch.tensor(sem_vals_to_f32(s_i, e_i, m_f, special_value)) + + # test cast to f6 + if dtype_name == DTYPE_FP6_E2M3: + f6 = f32_to_f6_e2m3_unpacked(f32_ref) + elif dtype_name == DTYPE_FP6_E3M2: + f6 = f32_to_f6_e3m2_unpacked(f32_ref) + else: + raise AssertionError("unsupported") + # test that the bits are equivalent to our starting point + torch.testing.assert_close(f6, t, rtol=0, atol=0) + + # test cast back to f32 + if dtype_name == DTYPE_FP6_E2M3: + f32 = f6_e2m3_unpacked_to_f32(f6) + elif dtype_name == DTYPE_FP6_E3M2: + f32 = f6_e3m2_unpacked_to_f32(f6) + else: + raise AssertionError("unsupported") + torch.testing.assert_close(f32, f32_ref, rtol=0, atol=0) diff --git a/test/prototype/mx_formats/test_mx_linear.py b/test/prototype/mx_formats/test_mx_linear.py new file mode 100644 index 0000000000..65f6002dbf --- /dev/null +++ b/test/prototype/mx_formats/test_mx_linear.py @@ -0,0 +1,210 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import copy + +import pytest + +import torch +import torch.nn as nn +from torchao.prototype.mx_formats.constants import SUPPORTED_ELEM_DTYPES + +from torchao.prototype.mx_formats.mx_linear import ( + MXInferenceLinear, + MXLinear, + swap_linear_with_mx_inference_linear, + swap_linear_with_mx_linear, +) + +from torchao.quantization.utils import compute_error, TORCH_VERSION_AFTER_2_4 + +# trying to outsmart flake8 +__has_cuda = torch.cuda.is_available() +IS_CUDA_GE_89 = __has_cuda and torch.cuda.get_device_capability() >= (8, 9) + +torch.manual_seed(2) + +if not TORCH_VERSION_AFTER_2_4: + pytest.skip("Unsupported PyTorch version", allow_module_level=True) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +@pytest.mark.parametrize("bias", [True, False]) +@pytest.mark.parametrize("input_shape", [(2, 4), (1, 2, 4), (1, 1, 2, 4)]) +def test_linear_eager(elem_dtype, bias, input_shape): + """ + Smoke test for training linear module with mx weight + """ + grad_shape = list(input_shape) + grad_shape[-1] = 6 + + m = nn.Sequential( + nn.Linear(4, 6, bias=bias, device="cuda"), + ) + m_mx = copy.deepcopy(m) + block_size = 2 + swap_linear_with_mx_linear(m_mx, elem_dtype, block_size) + + x_ref = torch.randn(*input_shape, device="cuda").requires_grad_() + x = copy.deepcopy(x_ref) + g = torch.randn(*grad_shape, device="cuda") + with torch.autocast("cuda", dtype=torch.bfloat16): + y_ref = m(x_ref) + y_mx = m_mx(x) + + y_ref.backward(g) + y_mx.backward(g) + + y_sqnr = compute_error(y_ref, y_mx) + w_g_sqnr = compute_error(m[0].weight.grad, getattr(m_mx, "0").weight.grad) + x_g_sqnr = compute_error(x_ref.grad, x.grad) + + if elem_dtype is torch.float8_e4m3fn: + assert y_sqnr >= 18.0 + assert w_g_sqnr >= 18.0 + assert x_g_sqnr >= 14.0 + else: + assert y_sqnr >= 8.0 + assert w_g_sqnr >= 10.0 + assert x_g_sqnr >= 8.0 + + +# TODO(future): enable compile support +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_activation_checkpointing(): + input_shape = (2, 4) + grad_shape = (2, 6) + elem_dtype = torch.float8_e4m3fn + + m = nn.Sequential( + nn.Linear(4, 6, bias=True, device="cuda"), + nn.Linear(6, 6, bias=True, device="cuda"), + ) + block_size = 2 + swap_linear_with_mx_linear(m, elem_dtype, block_size) + + x = torch.randn(*input_shape, device="cuda").requires_grad_() + g = torch.randn(*grad_shape, device="cuda") + y = torch.utils.checkpoint.checkpoint(m, x, use_reentrant=False) + y.backward(g) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +@pytest.mark.parametrize("bias", [False, True]) +def test_linear_compile(elem_dtype, bias): + """ + Verify that compile does not change numerics of MX linear fw + bw + """ + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + if not IS_CUDA_GE_89: + pytest.skip("CUDA capability >= 8.9 required for float8 in triton") + input_shape = (2, 4) + grad_shape = (2, 6) + m_mx = nn.Sequential( + nn.Linear(4, 6, bias=bias, device="cuda"), + ) + block_size = 2 + swap_linear_with_mx_linear(m_mx, elem_dtype, block_size) + m_mx_c = copy.deepcopy(m_mx) + m_mx_c = torch.compile(m_mx_c, fullgraph=True) + + x_ref = torch.randn(*input_shape, device="cuda").requires_grad_() + x = copy.deepcopy(x_ref) + g = torch.randn(*grad_shape, device="cuda") + + with torch.autocast("cuda", dtype=torch.bfloat16): + y_ref = m_mx(x_ref) + y = m_mx_c(x) + torch.testing.assert_close(y_ref, y, atol=0, rtol=0) + + y_ref.backward(g) + y.backward(g) + w_g_ref = m_mx[0].weight.grad + w_g = getattr(m_mx_c, "0").weight.grad + # TODO(future): investigate why we can't match with rtol=0 atol=0 + # after moving to torchao repo. Technically compile does not give + # bit exactness guarantees, but there also might be a bug lurking + # around. + torch.testing.assert_close(w_g_ref, w_g, atol=0.02, rtol=0.02) + + x_g_ref = x_ref.grad + x_g = x.grad + # TODO(future): investigate why we can't match with rtol=0 atol=0 + # after moving to torchao repo. Technically compile does not give + # bit exactness guarantees, but there also might be a bug lurking + # around. + torch.testing.assert_close(x_g_ref, x_g, atol=0.02, rtol=0.02) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +@pytest.mark.parametrize("bias", [True, False]) +@pytest.mark.parametrize("input_shape", [(2, 4), (1, 2, 4), (1, 1, 2, 4)]) +def test_inference_linear(elem_dtype, bias, input_shape): + """ + Smoke test for inference linear module with mx weight + """ + m = nn.Sequential(nn.Linear(4, 6, bias=bias, dtype=torch.bfloat16)) + m = m.cuda() + m_mx = copy.deepcopy(m) + block_size = 2 + swap_linear_with_mx_inference_linear(m_mx, elem_dtype, block_size) + + x = torch.randn(*input_shape, device="cuda", dtype=torch.bfloat16) + y_ref = m(x) + y_mx = m_mx(x) + sqnr = compute_error(y_ref, y_mx) + if elem_dtype is torch.float8_e4m3fn: + assert sqnr >= 20.0 + else: + assert sqnr >= 11.0 + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_inference_compile_simple(elem_dtype): + """ + Smoke test for inference compile + """ + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + if not IS_CUDA_GE_89: + pytest.skip("CUDA capability >= 8.9 required for float8 in triton") + m = nn.Sequential(nn.Linear(4, 6, bias=False, dtype=torch.bfloat16)) + m = m.cuda() + m_mx = copy.deepcopy(m) + block_size = 2 + swap_linear_with_mx_inference_linear(m_mx, elem_dtype, block_size) + m_mx = torch.compile(m_mx, fullgraph="true") + + x = torch.randn(2, 4, device="cuda", dtype=torch.bfloat16) + y_ref = m(x) + y_mx = m_mx(x) + sqnr = compute_error(y_ref, y_mx) + if elem_dtype is torch.float8_e4m3fn: + assert sqnr >= 20.0 + else: + assert sqnr >= 14.0 + + +def test_filter_fn(): + m1 = nn.Sequential( + nn.Linear(32, 32), + nn.Linear(32, 32), + ) + m2 = copy.deepcopy(m1) + filter_fn = lambda mod, fqn: fqn != "1" # noqa: E731 + + swap_linear_with_mx_linear(m1, torch.float8_e4m3fn, 32, filter_fn) + assert type(m1[0]) == MXLinear + assert type(m1[1]) == torch.nn.Linear + + swap_linear_with_mx_inference_linear( + m2, torch.float8_e4m3fn, 32, filter_fn + ) # noqa: E501 + assert type(m2[0]) == MXInferenceLinear + assert type(m2[1]) == torch.nn.Linear diff --git a/test/prototype/mx_formats/test_mx_tensor.py b/test/prototype/mx_formats/test_mx_tensor.py new file mode 100644 index 0000000000..f1b82e376b --- /dev/null +++ b/test/prototype/mx_formats/test_mx_tensor.py @@ -0,0 +1,265 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import pytest + +import torch +from torchao.prototype.mx_formats import config +from torchao.prototype.mx_formats.constants import ( + DTYPE_FP4, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + SUPPORTED_ELEM_DTYPES, +) + +from torchao.prototype.mx_formats.custom_cast import pack_uint4 + +from torchao.prototype.mx_formats.mx_tensor import ( + E8M0_EXPONENT_NAN_VAL, + MXTensor, + to_dtype, +) + +from torchao.quantization.utils import compute_error, TORCH_VERSION_AFTER_2_4 + +# trying to outsmart flake8 +__has_cuda = torch.cuda.is_available() +IS_CUDA_GE_89 = __has_cuda and torch.cuda.get_device_capability() >= (8, 9) + +torch.manual_seed(2) + +if not TORCH_VERSION_AFTER_2_4: + pytest.skip("Unsupported PyTorch version", allow_module_level=True) + + +@pytest.fixture(autouse=True) +def run_before_and_after_tests(): + # source: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test # noqa: E501 + + # setup (currently do nothing) + + # tests will run here + yield + + # teardown + # avoid dynamo cache limit issues + torch._dynamo.reset() + + +def _test_mx(data_hp, elem_dtype, block_size): + data_mx = MXTensor.to_mx(data_hp, elem_dtype, block_size) + data_mx_dq = data_mx.to_dtype(data_hp.dtype) + + def assert_sqnr_gt_threshold(orig, new, threshold): + sqnr = compute_error(orig, new) + if torch.all(torch.isnan(sqnr)): + # if both operands are full of zeroes, sqnr is nan and this is ok + # test for this explicitly + assert torch.all(orig == 0) and torch.all(new == 0) + else: + assert sqnr >= threshold + + if elem_dtype is torch.float8_e4m3fn: + assert_sqnr_gt_threshold(data_hp, data_mx_dq, 20.0) + else: + assert_sqnr_gt_threshold(data_hp, data_mx_dq, 14.0) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_hello_world(elem_dtype): + data = torch.randn(4, 4, device="cuda", dtype=torch.bfloat16) + block_size = 2 + _test_mx(data, elem_dtype, block_size) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_all_zeros(elem_dtype): + data = torch.zeros(4, 4, device="cuda", dtype=torch.bfloat16) + block_size = 2 + _test_mx(data, elem_dtype, block_size) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_some_zeros(elem_dtype): + data = torch.randn(4, 4, device="cuda", dtype=torch.bfloat16) + data[0, :] = 0.0 + data[:, 2] = 0.0 + block_size = 2 + _test_mx(data, elem_dtype, block_size) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_exponent_nan_in(elem_dtype): + """ + If high precision block values has a NaN, the exponent block + value is set to is NaN + """ + tensor_hp = torch.tensor( + [float("nan"), 1, 2, 3, 4, 5], device="cuda", dtype=torch.bfloat16 + ) + block_size = 2 + tensor_mx = MXTensor.to_mx(tensor_hp, elem_dtype, block_size) + assert torch.all(tensor_mx._scale_e8m0[0] == E8M0_EXPONENT_NAN_VAL) + assert not torch.any(tensor_mx._scale_e8m0[1:] == E8M0_EXPONENT_NAN_VAL) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_exponent_nan_out(elem_dtype): + """ + If block exponent value is NaN, the MX tensor block value is NaN + """ + scale_e8m0_bits = torch.tensor( + [E8M0_EXPONENT_NAN_VAL, 23, 42], dtype=torch.uint8, device="cuda" + ) + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + data_bits = torch.tensor( + [0, 1, 2, 3, 4, 5], dtype=elem_dtype, device="cuda" + ) # noqa: E501 + elif elem_dtype in (DTYPE_FP6_E2M3, DTYPE_FP6_E3M2): + data_bits = torch.tensor( + [0, 1, 2, 3, 4, 5], dtype=torch.uint8, device="cuda" + ) # noqa: E501 + elif elem_dtype == DTYPE_FP4: + data_bits = torch.tensor( + [0, 1, 2, 3, 4, 5], dtype=torch.uint8, device="cuda" + ) # noqa: E501 + data_bits = pack_uint4(data_bits) + else: + raise AssertionError("unsupported") + block_size = 2 + tensor_mx = MXTensor( + scale_e8m0_bits, data_bits, elem_dtype, block_size, torch.float + ) + tensor_hp = tensor_mx.to_dtype(torch.float) + assert torch.all(torch.isnan(tensor_hp[0:1])) + assert not torch.any(torch.isnan(tensor_hp[2:])) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_ranks(elem_dtype): + """ + The reshaping logic works for various ranks + """ + B = 2 + shapes = ((B * 4,), (B * 4, 2), (B * 4, 2, 2), (B * 4, 2, 2, 2)) + for s in shapes: + tensor_hp = torch.randn(*s, device="cuda", dtype=torch.bfloat16) + _test_mx(tensor_hp, elem_dtype, B) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_block_sizes(elem_dtype): + """ + Smoke test for various block sizes + """ + for B in (1, 2, 32): + if B == 1 and elem_dtype == DTYPE_FP4: + pytest.skip("unsupported configuration") + tensor_hp = torch.randn(B, device="cuda", dtype=torch.bfloat16) + _test_mx(tensor_hp, elem_dtype, B) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +@pytest.mark.parametrize("fp4_triton", [False, True]) +def test_transpose(elem_dtype, fp4_triton): + """ + Verify that transposing an MX tensor works + """ + if elem_dtype != DTYPE_FP4 and fp4_triton: + pytest.skip("unsupported configuration") + + tensor_hp = torch.randn(128, 256, device="cuda", dtype=torch.bfloat16) + block_size = 32 + tensor_mx = MXTensor.to_mx(tensor_hp, elem_dtype, block_size) + config.use_fp4_custom_triton_dequant_kernel = fp4_triton + tensor_mx_dq_t = tensor_mx.to_dtype(tensor_hp.dtype).t() + config.use_fp4_custom_triton_dequant_kernel = False + + tensor_mx_t = tensor_mx.t() + config.use_fp4_custom_triton_dequant_kernel = fp4_triton + tensor_mx_t_dq = tensor_mx_t.to_dtype(tensor_hp.dtype) + config.use_fp4_custom_triton_dequant_kernel = False + + assert tensor_mx_dq_t.shape == tensor_mx_t_dq.shape + torch.testing.assert_close(tensor_mx_dq_t, tensor_mx_t_dq, atol=0, rtol=0) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_cast_autograd(elem_dtype): + x = torch.arange(8, device="cuda").bfloat16().requires_grad_() + grad = torch.arange(8, device="cuda").bfloat16() * 0.5 + block_size = 8 + x_mx = MXTensor.to_mx(x, elem_dtype, block_size) + x_dq = x_mx.to_dtype(torch.bfloat16) + x_dq.backward(gradient=grad) + torch.testing.assert_close(grad, x.grad, atol=0, rtol=0) + + +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +def test_view(elem_dtype): + x = torch.randn(1, 2, 4) + block_size = 2 + x_mx = MXTensor.to_mx(x, elem_dtype, block_size) + x_mx_2 = x_mx.view(2, 4) # noqa: F841 + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("elem_dtype", SUPPORTED_ELEM_DTYPES) +@pytest.mark.parametrize("hp_dtype", [torch.float32, torch.bfloat16]) +@pytest.mark.parametrize("all_zeros", [False, True]) +def test_to_mx_from_mx_compile_numerics(elem_dtype, hp_dtype, all_zeros): + """ + Verifies that compile does not change numerics of MX casts + """ + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + if not IS_CUDA_GE_89: + # separate ifs because flake8 is outsmarting me + pytest.skip("CUDA capability >= 8.9 required for float8 in triton") + + shape = 4, 8 + if not all_zeros: + x = torch.randn(*shape, dtype=hp_dtype, device="cuda") + else: + x = torch.zeros(*shape, dtype=hp_dtype, device="cuda") + block_size = 2 + to_mx_c = torch.compile(MXTensor.to_mx, fullgraph=True) + + x_mx = MXTensor.to_mx(x, elem_dtype, block_size) + x_mx_c = to_mx_c(x, elem_dtype, block_size) + torch.testing.assert_close( + x_mx._scale_e8m0, + x_mx_c._scale_e8m0, + atol=0, + rtol=0, + ) + torch.testing.assert_close(x_mx._data, x_mx_c._data, atol=0, rtol=0) + + to_dtype_c = torch.compile(to_dtype, fullgraph=True) + + x_mx_dq = to_dtype( + x_mx._data, + x_mx._scale_e8m0, + x_mx._elem_dtype, + x_mx._block_size, + hp_dtype, # noqa: E501 + ) + x_mx_c_dq = to_dtype_c( + x_mx_c._data, + x_mx_c._scale_e8m0, + x_mx_c._elem_dtype, + x_mx_c._block_size, + hp_dtype, + ) + torch.testing.assert_close(x_mx_dq, x_mx_c_dq, atol=0, rtol=0) diff --git a/torchao/prototype/mx_formats/README.md b/torchao/prototype/mx_formats/README.md new file mode 100644 index 0000000000..b750c26af2 --- /dev/null +++ b/torchao/prototype/mx_formats/README.md @@ -0,0 +1,102 @@ +# MX formats with native PyTorch POC + +This is a POC of training and inference with tensors in the MX format from the OCP spec (https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf) in native PyTorch. + +Note that the current version of the code is written for readability and +numerical correctness and not yet for optimal performance. We welcome +contributions on performance improvements. + +Note that there are no BC guarantees at the moment and we plan to evolve +this code as the hardware specifics of MX-accelerated matmuls become +known. + +# Current status + +## user API (subject to change) + +### MXTensor + +This is casts between high precision and MX formats implemented in native PyTorch. Currently +only `torch.float32` and `torch.bfloat16` are supported as high precision formats. + +```python +from torchao.prototype.mx_formats.mx_tensor import MXTensor +# Note: MX int8 is not implemented yet +from torchao.prototype.mx_formats.constants import DTYPE_FP6_E2M3, DTYPE_FP6_E3M2, DTYPE_FP4 +x = torch.randn(32, 32, device='cuda') + +# elem_dtype can be torch.float8_e4m3fn, torch.float8_e5m2, DTYPE_FP6_E2M3, DTYPE_FP6_E3M2, DTYPE_FP4 +elem_dtype = torch.float8_e4m3fn + +# high precision to MX, block size defaults to 32 +x_mx = MXTensor.to_mx(x, elem_dtype) + +# mx back to high precision +x_hp = x_mx.to_dtype(torch.float) +``` + +### MXLinear + +This is a module to do MX training, the MX matmul is currently emulated. + +```python +from torchao.prototype.mx_formats.mx_linear import swap_linear_with_mx_linear + +m = torch.nn.Sequential(torch.nn.Linear(32, 32)).cuda() +elem_dtype = torch.float8_e4m3fn +block_size = 32 +swap_linear_with_mx_linear(m, elem_dtype, block_size) + +# training loop (not shown) +``` + +### MXInferenceLinear + +This is a module to do MX inference, weights are in MX and matmul is in high precision. + +```python +from torchao.prototype.mx_formats.mx_linear import swap_linear_with_mx_inference_linear + +m = torch.nn.Sequential(torch.nn.Linear(32, 32)).cuda() +elem_dtype = torch.float8_e4m3fn +block_size = 32 +swap_linear_with_mx_inference_linear(m, elem_dtype, block_size) + +# do inference (not shown) +``` + +## accuracy status +* we match bitwise to other implementations of the OCP MX spec (code not in this repo), with a couple of edge cases left to resolve +* approximate numerics pass for `MXLinear` and `MXInferenceLinear` on sample inputs +* LLaMa 3 8B pretraining on 4 GPUs for 500 iterations shows that loss convergence is not meaningfully degraded (code not in this repo) + +## performance status + +### quant and dequant + +* we have a benchmark of quantizing and dequantizing mxfp8 and mxfp4 tensors with size (1, 4096, 11008) +* latest numbers: https://gist.github.com/vkuzo/83656e4a74777cfc0915de6b27be1ff6 + +## testing and benchmarking + +```bash +# numerical testing of custom fp4 and fp6 casts +pytest test/prototype/mx_formats/test_custom_cast.py +# testing of MXTensor +pytest test/prototype/mx_formats/test_mx_tensor.py +# testing of MXLinear and MXInferenceLinear +pytest test/prototype/mx_formats/test_mx_linear.py + +# run the quant and dequant benchmark +python torchao/prototype/mx_formats/benchmarks/bench_qdq.py +``` + +## floating point format convenience functions + +We have a convenience script which summarizes the various properties of +floating point formats: + +```bash +python torchao/prototype/mx_formats/fp_format_spec.py +# example output: https://gist.github.com/vkuzo/b8e114aa83736f87d6618b16aa8588c0 +``` diff --git a/torchao/prototype/mx_formats/__init__.py b/torchao/prototype/mx_formats/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torchao/prototype/mx_formats/benchmarks/bench_qdq.py b/torchao/prototype/mx_formats/benchmarks/bench_qdq.py new file mode 100644 index 0000000000..6e6e373de3 --- /dev/null +++ b/torchao/prototype/mx_formats/benchmarks/bench_qdq.py @@ -0,0 +1,148 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Benchmarking mx quantize/dequantize +""" + +from typing import Optional + +import fire +import tabulate +import torch + +from torch.profiler import profile, ProfilerActivity +from torchao.prototype.mx_formats import config +from torchao.prototype.mx_formats.constants import ( # noqa: E501 + DTYPE_FP4, + SUPPORTED_ELEM_DTYPES, +) + +from torchao.prototype.mx_formats.mx_tensor import MXTensor +from torchao.utils import benchmark_torch_function_in_microseconds + + +def run(profile_folder: Optional[str] = None): + headers = [ + "elem_dtype", + "use_fp4_custom_triton_dequant_kernel", + "q_time_us", + "q_mem_bw_tb_s", + "dq_time_us", + "dq_mem_bw_tb_s", + ] + results = [] + + data_hp = torch.randn(1, 4096, 11008, dtype=torch.bfloat16, device="cuda") + + for elem_dtype in SUPPORTED_ELEM_DTYPES: + for use_fp4_custom_triton_dequant_kernel in (False, True): + config.use_fp4_custom_triton_dequant_kernel = ( + use_fp4_custom_triton_dequant_kernel + ) + + if ( + elem_dtype != DTYPE_FP4 + and use_fp4_custom_triton_dequant_kernel # noqa: E501 + ): + # custom_triton_kernels only works for fp4 + continue + + print( + "elem_dtype", + elem_dtype, + "use_fp4_custom_triton_dequant_kernel", + use_fp4_custom_triton_dequant_kernel, + ) + + data_lp = MXTensor.to_mx(data_hp, elem_dtype, block_size=32) + + if not use_fp4_custom_triton_dequant_kernel: + quant = torch.compile(MXTensor.to_mx) + dequant = torch.compile(data_lp.to_dtype) + else: + # As of 2024-04, torch.compile didn't work with the + # handwritten triton kernel, + # crashed on tl.interleave: + # https://github.com/pytorch/pytorch/issues/123967 + # As of 2024-05-24, now there is message asking to convert to + # an opaque custom op: + # https://gist.github.com/vkuzo/0b0b90dca03bdb8e0446e4135644238a # noqa: E501 + # TODO(future): make this better + quant = MXTensor.to_mx + dequant = data_lp.to_dtype + + # warm up + quant(data_hp, elem_dtype, block_size=32) + res = dequant(torch.bfloat16) + + if profile_folder is not None: + with profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + record_shapes=True, + ) as prof: + for _ in range(5): + quant(data_hp, elem_dtype, block_size=32) + dequant(torch.bfloat16) + prof.export_chrome_trace( + profile_folder + + f"/mx_qdq_{elem_dtype}_{use_fp4_custom_triton_dequant_kernel}.json" # noqa: E501 + ) + + q_execution_time_us = benchmark_torch_function_in_microseconds( + quant, data_hp, elem_dtype, block_size=32 + ) + dq_execution_time_us = benchmark_torch_function_in_microseconds( + dequant, torch.bfloat16 + ) + print(f"q time: {q_execution_time_us} us") + print(f"dq time: {dq_execution_time_us} us") + + # memory reads per element: + byte_per_stored_element = 1.0 # fp8 or 2xfp4 + byte_per_stored_exp_element = 1.0 # e8m0 + byte_per_dequantized_element = 2.0 # bfloat16 + mem_reads_writes_bytes = ( + # read raw data + (data_lp._data.numel() * byte_per_stored_element) + + + # read exponent + (data_lp._scale_e8m0.numel() * byte_per_stored_exp_element) + + + # write dequant + (res.numel() * byte_per_dequantized_element) + ) + # note: the above also works for quant, with reads/writes in + # reverse + + q_mem_bw_tb_s = (mem_reads_writes_bytes / 1e12) / ( + q_execution_time_us / 1e6 + ) + dq_mem_bw_tb_s = (mem_reads_writes_bytes / 1e12) / ( + dq_execution_time_us / 1e6 + ) + print(f"q mem bw: {q_mem_bw_tb_s} TB/s") + print(f"dq mem bw: {dq_mem_bw_tb_s} TB/s") + + results.append( + ( + elem_dtype, + use_fp4_custom_triton_dequant_kernel, + q_execution_time_us, + q_mem_bw_tb_s, + dq_execution_time_us, + dq_mem_bw_tb_s, + ) + ) + config.use_fp4_custom_triton_dequant_kernel = False + + torch._dynamo.reset() + + print(tabulate.tabulate(results, headers=headers, floatfmt=".2f")) + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/torchao/prototype/mx_formats/config.py b/torchao/prototype/mx_formats/config.py new file mode 100644 index 0000000000..3e7e03d8f6 --- /dev/null +++ b/torchao/prototype/mx_formats/config.py @@ -0,0 +1,2 @@ +# If True, uses a custom triton kernel for fp4 dequantize +use_fp4_custom_triton_dequant_kernel = False diff --git a/torchao/prototype/mx_formats/constants.py b/torchao/prototype/mx_formats/constants.py new file mode 100644 index 0000000000..402bf24a09 --- /dev/null +++ b/torchao/prototype/mx_formats/constants.py @@ -0,0 +1,51 @@ +import torch + +# This is conceptually an enum of non-core dtypes +# TODO(future PR): change to a cleaner way to represent this without +# regressing torch.compile and while keeping things readable. +DTYPE_FP4 = "fp4_e2m1" +DTYPE_FP6_E3M2 = "fp6_e3m2" +DTYPE_FP6_E2M3 = "fp6_e2m3" + +# Supported element dtypes +# TODO(future PR): add support for MX int8 +SUPPORTED_ELEM_DTYPES = [ + torch.float8_e4m3fn, + torch.float8_e5m2, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + DTYPE_FP4, +] + +F8E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max # 448.0 +F8E5M2_MAX = torch.finfo(torch.float8_e5m2).max # 57344.0 + +F8E4M3_MAX_POW2 = 8 # 256 +F8E5M2_MAX_POW2 = 15 # 32768 +F6_E2M3_MAX_POW2 = 2 # 4 +F6_E3M2_MAX_POW2 = 4 # 16 +F4_E2M1_MAX_POW2 = 2 # 4 + +E8M0_EXPONENT_BIAS = 127 +E8M0_EXPONENT_NAN_VAL = 255 + +F32_EXP_BIAS = 127 +F6_E2M3_EXP_BIAS = 1 +F6_E3M2_EXP_BIAS = 3 +F4_E2M1_EXP_BIAS = 1 + +F32_MIN_NORMAL = 2 ** (-F32_EXP_BIAS + 1) + +F6_E2M3_MAX = 7.5 +F6_E2M3_MIN_NORMAL = 1.0 +F6_E2M3_MAX_INT = 31 # integer corresponding to 0b00011111 + +F6_E3M2_MAX = 28.0 +F6_E3M2_MIN_NORMAL = 0.25 +F6_E3M2_MAX_INT = 31 # integer corresponding to 0b00011111 + +F4_E2M1_MAX = 6.0 +F4_E2M1_MIN_NORMAL = 1.0 +F4_E2M1_MAX_INT = 7 + +BLOCK_SIZE_DEFAULT = 32 diff --git a/torchao/prototype/mx_formats/custom_cast.py b/torchao/prototype/mx_formats/custom_cast.py new file mode 100644 index 0000000000..60aaa336ba --- /dev/null +++ b/torchao/prototype/mx_formats/custom_cast.py @@ -0,0 +1,713 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import struct + +import numpy as np + +import torch +from torch.utils._triton import has_triton + +from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4 + +# TODO(future): if needed, make the below work on previous PyTorch versions, +# just need to hunt down the previous location of `libdevice`. An assert +# at the callsite prevents usage of this on unsupported versions. +if TORCH_VERSION_AFTER_2_4: + from torch._inductor.runtime.triton_helpers import libdevice + +from torchao.prototype.mx_formats.constants import ( + DTYPE_FP4, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + E8M0_EXPONENT_BIAS, + E8M0_EXPONENT_NAN_VAL, + F32_EXP_BIAS, + F4_E2M1_EXP_BIAS, + F4_E2M1_MAX, + F4_E2M1_MAX_INT, + F4_E2M1_MIN_NORMAL, + F6_E2M3_EXP_BIAS, + F6_E2M3_MAX, + F6_E2M3_MAX_INT, + F6_E2M3_MIN_NORMAL, + F6_E3M2_EXP_BIAS, + F6_E3M2_MAX, + F6_E3M2_MAX_INT, + F6_E3M2_MIN_NORMAL, +) + + +def get_bits(x: torch.Tensor) -> str: + bits_per_byte = 8 + # Numpy has a nice function to get the string representation of binary. + # Since we are using ints as views of floats, need to specify the width + # to avoid numpy from using two's complement for negative numbers. + return np.binary_repr( + x.cpu().numpy(), width=x.element_size() * bits_per_byte + ) # noqa: E501 + + +EBITS_F32, MBITS_F32 = 8, 23 +EBITS_F4_E2M1, MBITS_F4_E2M1 = 2, 1 +EBITS_F6_E2M3, MBITS_F6_E2M3 = 2, 3 +EBITS_F6_E3M2, MBITS_F6_E3M2 = 3, 2 + +DENORM_F32TOF4_EXP = ( + # exp bias conversion between formats + (F32_EXP_BIAS - F4_E2M1_EXP_BIAS) + # mantissa length difference between formats + + (MBITS_F32 - MBITS_F4_E2M1) + # add one to encoded exponent for denormalized numbers + + 1 +) +DENORM_F32TOF4_MASK_INT = DENORM_F32TOF4_EXP << MBITS_F32 +# reinterpret int32 as float32 in Python +# see https://stackoverflow.com/a/34446112/1058521 +DENORM_F32TOF4_MASK_FLOAT = struct.unpack( + "!f", struct.pack("!I", DENORM_F32TOF4_MASK_INT) +)[0] + +DENORM_F32TOF6_E2M3_EXP = ( + # exp bias conversion between formats + (F32_EXP_BIAS - F6_E2M3_EXP_BIAS) + # mantissa length difference between formats + + (MBITS_F32 - MBITS_F6_E2M3) + # add one to encoded exponent for denormalized numbers + + 1 +) +DENORM_F32TOF6_E2M3_MASK_INT = DENORM_F32TOF6_E2M3_EXP << MBITS_F32 +# reinterpret int32 as float32 in Python +# see https://stackoverflow.com/a/34446112/1058521 +DENORM_F32TOF6_E2M3_MASK_FLOAT = struct.unpack( + "!f", struct.pack("!I", DENORM_F32TOF6_E2M3_MASK_INT) +)[0] + +DENORM_F32TOF6_E3M2_EXP = ( + # exp bias conversion between formats + (F32_EXP_BIAS - F6_E3M2_EXP_BIAS) + # mantissa length difference between formats + + (MBITS_F32 - MBITS_F6_E3M2) + # add one to encoded exponent for denormalized numbers + + 1 +) +DENORM_F32TOF6_E3M2_MASK_INT = DENORM_F32TOF6_E3M2_EXP << MBITS_F32 +# reinterpret int32 as float32 in Python +# see https://stackoverflow.com/a/34446112/1058521 +DENORM_F32TOF6_E3M2_MASK_FLOAT = struct.unpack( + "!f", struct.pack("!I", DENORM_F32TOF6_E3M2_MASK_INT) +)[0] + +# +# magic value to add during the normal path +# TODO document this better +# + +# c++ code e5m2: +# f_bits += ((uint32_t)(15 - 127) << 23) + 0xFFFFF; +# 0xFFFFF is 1111 1111 1111 1111 1111, 20 ones, 20 = 23 - 3 = 23 - 2 - 1 + +# c++ code e4m3: +# f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF; +# 0x7FFFF is 0111 1111 1111 1111 1111, 19 ones, 19 = 23 - 4 = 23 - 3 - 1 + +MAGIC_ADDER_F4_E2M1 = 0x1FFFFF # 21 ones +MAGIC_ADDER_F6_E2M3 = 0x7FFFF # 19 ones +MAGIC_ADDER_F6_E3M2 = 0xFFFFF # 20 ones + +# c++ code named vars +# f_bits += ((uint32_t)(f8_exp_bias - f32_exp_bias) << f32_mbits) + MAGIC_ADDER; # noqa: E501 + +SIGN_MASK_F4 = 0x8 # 1000 +SIGN_MASK_F6_E2M3 = 0x20 # 100000 +SIGN_MASK_F6_E3M2 = 0x20 # 100000 + +MANTISSA_MASK_F4 = 0x1 # 0001 +MANTISSA_MASK_F6_E2M3 = 0x7 # 000111 +MANTISSA_MASK_F6_E3M2 = 0x3 # 000011 + +ZERO_BITS_F32 = 0x0 +ZERO_POINT_FIVE_BITS_F32 = 0x3F000000 + + +def _f32_to_f4_or_f6_unpacked( + x, + max_normal, + min_normal, + denorm_mask_float, + denorm_mask_int, + ebits, + mbits, + exp_bias, + magic_adder, + max_int, + sign_mask, +): + """ + Input: torch.Tensor of dtype torch.float + Output: torch.Tensor of dtype torch.uint8, + fp4: bits 0-3 empty and bits 4-7 in fp4_e2m1 encoding + fp6: bits 0-1 empty and bits 2-7 in the fp6_e2m3 or fp6_e3m2 encoding + + Note: there is no special values (NaN, inf) support in this code as the + OCP spec does not define special values for fp6 and fp4 dtypes. + + Code below is an adaptation of https://fburl.com/code/ciwofcg4 for f4/f6 + + Background 1: last answer in https://stackoverflow.com/questions/8981913/how-to-perform-round-to-even-with-floating-point-numbers # noqa: E501 + Background 2: Computer Organization and Design, RISC-V edition, Chapter 3.5 + """ + assert x.dtype == torch.float + + # save the sign + # Note that we have torch.uint32, but some ops like cpu bit shifts + # do not work on it. So, we stay in int32. + x = x.view(torch.int32) + sign = x & 0x80000000 + + # set everything to positive, will add sign back at the end + x = x ^ sign + + # TODO: can the branch floating point comparisons below be done without + # converting to float? probably but need to verify + x = x.view(torch.float) + + # rewrite saturate/denorm/norm branches without explicit data dependent + # control flow, to be more compiler friendly + saturate_mask = x >= max_normal + denormal_mask = torch.logical_and( + torch.logical_not(saturate_mask), x < min_normal + ) # noqa: E501 + normal_mask = torch.logical_not( + torch.logical_or(saturate_mask, denormal_mask) + ) # noqa: E501 + + # + # branch 1: saturate to max val - handled later in the code which combines + # the branches + # + + # + # branch 2: to conversion to denormal as well as rounding up to normal + # + denormal_x = x + denorm_mask_float + denormal_x = denormal_x.view(torch.int32) + denormal_x -= denorm_mask_int + denormal_x = denormal_x.to(torch.uint8) + + # + # branch 3: stay in normal range, adjust the exponent and round + # + normal_x = x.view(torch.int32) + # resulting mantissa is odd + mant_odd = (normal_x >> (MBITS_F32 - mbits)) & 1 + # update exponent, rounding bias part 1 + val_to_add = ((exp_bias - F32_EXP_BIAS) << MBITS_F32) + magic_adder + normal_x += val_to_add + # rounding bias part 2 + normal_x += mant_odd + # take the bits! + normal_x = normal_x >> (MBITS_F32 - mbits) + normal_x = normal_x.to(torch.uint8) + + # + # combine the branches + # + x = torch.full_like(x, max_int, dtype=torch.uint8) + x = torch.where(denormal_mask, denormal_x, x) + x = torch.where(normal_mask, normal_x, x) + + # add sign back + sign_lp = sign >> (MBITS_F32 + EBITS_F32 - mbits - ebits) + sign_lp = sign_lp.to(torch.uint8) + # Right shift of a negative signed integer can fill the least significant + # bits with either 1s or 0s, depending on the implementation. Since PyTorch + # doesn't have an uint32 dtype, we mask out these bits to get just the + # f4 sign bit + sign_lp = sign_lp & sign_mask + x = x | sign_lp + + return x.to(torch.uint8) + + +def f32_to_f4_unpacked(x): + """ + Input: torch.Tensor of dtype torch.float + Output: torch.Tensor of dtype torch.uint8, with bits 0-3 empty and + bits 4-7 in fp4_e2m1 + """ + return _f32_to_f4_or_f6_unpacked( + x, + F4_E2M1_MAX, + F4_E2M1_MIN_NORMAL, + DENORM_F32TOF4_MASK_FLOAT, + DENORM_F32TOF4_MASK_INT, + EBITS_F4_E2M1, + MBITS_F4_E2M1, + F4_E2M1_EXP_BIAS, + MAGIC_ADDER_F4_E2M1, + F4_E2M1_MAX_INT, + SIGN_MASK_F4, + ) + + +def f32_to_f6_e2m3_unpacked(x): + """ + Input: torch.Tensor of dtype torch.float + Output: torch.Tensor of dtype torch.uint8, with bits 0-1 empty and + bits 2-7 in fp6_e2m3 + """ + return _f32_to_f4_or_f6_unpacked( + x, + F6_E2M3_MAX, + F6_E2M3_MIN_NORMAL, + DENORM_F32TOF6_E2M3_MASK_FLOAT, + DENORM_F32TOF6_E2M3_MASK_INT, + EBITS_F6_E2M3, + MBITS_F6_E2M3, + F6_E2M3_EXP_BIAS, + MAGIC_ADDER_F6_E2M3, + F6_E2M3_MAX_INT, + SIGN_MASK_F6_E2M3, + ) + + +def f32_to_f6_e3m2_unpacked(x): + """ + Input: torch.Tensor of dtype torch.float + Output: torch.Tensor of dtype torch.uint8, with bits 0-1 empty and + bits 2-7 in fp6_e3m2 + """ + return _f32_to_f4_or_f6_unpacked( + x, + F6_E3M2_MAX, + F6_E3M2_MIN_NORMAL, + DENORM_F32TOF6_E3M2_MASK_FLOAT, + DENORM_F32TOF6_E3M2_MASK_INT, + EBITS_F6_E3M2, + MBITS_F6_E3M2, + F6_E3M2_EXP_BIAS, + MAGIC_ADDER_F6_E3M2, + F6_E3M2_MAX_INT, + SIGN_MASK_F6_E3M2, + ) + + +def _f4_or_f6_unpacked_to_f32(x: torch.Tensor, lp_dtype_name: str): + """ + Input: torch.Tensor of dtype uint8, with bits 0-3 empty and bits 4-7 + containing an fp4_e2m1 encoding + Output: torch.Tensor of dtype fp32 with the dequantized value + + TODO(future): check if LUT for everything is faster than bit shifting, + especially for fp4. + """ + assert x.dtype == torch.uint8 + + if lp_dtype_name == DTYPE_FP4: + sign_mask = SIGN_MASK_F4 + ebits = EBITS_F4_E2M1 + mbits = MBITS_F4_E2M1 + exp_bias = F4_E2M1_EXP_BIAS + mantissa_mask = MANTISSA_MASK_F4 + elif lp_dtype_name == DTYPE_FP6_E2M3: + sign_mask = SIGN_MASK_F6_E2M3 + ebits = EBITS_F6_E2M3 + mbits = MBITS_F6_E2M3 + exp_bias = F6_E2M3_EXP_BIAS + mantissa_mask = MANTISSA_MASK_F6_E2M3 + elif lp_dtype_name == DTYPE_FP6_E3M2: + sign_mask = SIGN_MASK_F6_E3M2 + ebits = EBITS_F6_E3M2 + mbits = MBITS_F6_E3M2 + exp_bias = F6_E3M2_EXP_BIAS + mantissa_mask = MANTISSA_MASK_F6_E3M2 + else: + raise AssertionError(f"unsupported lp_dtype_name {lp_dtype_name}") + + # save the sign + sign_lp = x & sign_mask + + # set everything to positive, will add sign back at the end + x_pos = x ^ sign_lp + + # + # 1. Calculate zero mask + # + zero_mask = x_pos == 0 + + # + # 2. Calculate the denormal path mask + # + denormal_mask = torch.logical_and((x_pos > 0), ((x_pos >> mbits) == 0)) + + # + # 3. Calculate the normal path + # + + # calculate the new exponent and shift it to bits 2:9 of the result + exp_biased_lp = x_pos >> mbits + exp_biased_f32 = exp_biased_lp - exp_bias + F32_EXP_BIAS + exp_biased_f32 = exp_biased_f32.to(torch.int32) << MBITS_F32 + + # shift the mantissa to bits 10:32 of the result + mantissa_lp_int32 = (x_pos & mantissa_mask).to(torch.int32) + mantissa_f32 = mantissa_lp_int32 << (MBITS_F32 - mbits) + result = exp_biased_f32 | mantissa_f32 + + # + # 4. Add the zero and denormal casts to the already casted normal path + # + result[zero_mask] = ZERO_BITS_F32 + # Note: for now the denormal path cast is written for readability and + # numerical correctness. There is likely a way to optimize the performance, + # I just haven't had time to look into it. + if lp_dtype_name == DTYPE_FP4: + result[denormal_mask] = ZERO_POINT_FIVE_BITS_F32 + + elif lp_dtype_name == DTYPE_FP6_E2M3: + # Only 7 possible values, just do a LUT + # Note: calculate the booleans first because we are modifying + # this variable inplace. + is_val1 = mantissa_lp_int32 == 1 + is_val2 = mantissa_lp_int32 == 2 + is_val3 = mantissa_lp_int32 == 3 + is_val4 = mantissa_lp_int32 == 4 + is_val5 = mantissa_lp_int32 == 5 + is_val6 = mantissa_lp_int32 == 6 + is_val7 = mantissa_lp_int32 == 7 + mantissa_lp_int32[is_val1] = 0x3E000000 # 0.125 + mantissa_lp_int32[is_val2] = 0x3E800000 # 0.25 + mantissa_lp_int32[is_val3] = 0x3EC00000 # 0.375 + mantissa_lp_int32[is_val4] = 0x3F000000 # 0.5 + mantissa_lp_int32[is_val5] = 0x3F200000 # 0.625 + mantissa_lp_int32[is_val6] = 0x3F400000 # 0.75 + mantissa_lp_int32[is_val7] = 0x3F600000 # 0.875 + result = torch.where(denormal_mask, mantissa_lp_int32, result) + + elif lp_dtype_name == DTYPE_FP6_E3M2: + # Only 3 possible values, just do a LUT + # Note: calculate the booleans first because we are modifying + # this variable inplace. + is_val1 = mantissa_lp_int32 == 1 + is_val2 = mantissa_lp_int32 == 2 + is_val3 = mantissa_lp_int32 == 3 + mantissa_lp_int32[is_val1] = 0x3D800000 # 0.0625 + mantissa_lp_int32[is_val2] = 0x3E000000 # 0.125 + mantissa_lp_int32[is_val3] = 0x3E400000 # 0.1875 + result = torch.where(denormal_mask, mantissa_lp_int32, result) + else: + raise AssertionError(f"unsupported lp_dtype_name {lp_dtype_name}") + + # add sign back + sign_f32 = sign_lp.to(torch.int32) << ( + MBITS_F32 - mbits + EBITS_F32 - ebits + ) # noqa: E501 + result = result | sign_f32 + + return result.view(torch.float) + + +def f4_unpacked_to_f32(x: torch.Tensor): + """ + Input: torch.Tensor of dtype uint8, with bits 0-3 empty and bits 4-7 + containing an fp4_e2m1 encoding + Output: torch.Tensor of dtype fp32 with the dequantized value + """ + return _f4_or_f6_unpacked_to_f32(x, DTYPE_FP4) + + +def f6_e2m3_unpacked_to_f32(x: torch.Tensor): + """ + Input: torch.Tensor of dtype uint8, with bits 0-1 empty and bits 2-7 + containing an fp6_e3m2 encoding + Output: torch.Tensor of dtype fp32 with the dequantized value + """ + return _f4_or_f6_unpacked_to_f32(x, DTYPE_FP6_E2M3) + + +def f6_e3m2_unpacked_to_f32(x: torch.Tensor): + """ + Input: torch.Tensor of dtype uint8, with bits 0-1 empty and bits 2-7 + containing an fp6_e3m2 encoding + Output: torch.Tensor of dtype fp32 with the dequantized value + """ + return _f4_or_f6_unpacked_to_f32(x, DTYPE_FP6_E3M2) + + +if has_triton(): + import triton + import triton.language as tl + + @triton.jit + def _fp4_packed_to_bf16(x_packed): + """ + Input: a tensor of packed fp4 values + Output: a tensor of bfloat16 values + """ + + # low-bits: original location 0:3 + # high-bits: original location 4:7 + x_low_bits = x_packed >> 4 + x_high_bits = x_packed & 0xF + x = tl.interleave(x_low_bits, x_high_bits) + + # cast logic below + # output = x_unpacked.to(tl.float32) + + # save the sign + sign_f4 = x & SIGN_MASK_F4 + + # set everything to positive, will add sign back at the end + x_pos = x ^ sign_f4 + + # Special case zero + zero_mask = x_pos == 0 + + # There is only one denormal value in fp4: s001, which is 0.5 in f32 + # Special case it. + # TODO(later): will it be faster to repeat this for all 8 positive + # values instead of the bit manipulations? + denormal_mask = x_pos == 1 + + # calculate the new exponent and shift it to bits 2:9 of the result + exp_biased_f4 = x_pos >> MBITS_F4_E2M1 + exp_biased_f32 = exp_biased_f4 - F4_E2M1_EXP_BIAS + F32_EXP_BIAS + exp_biased_f32 = exp_biased_f32.to(tl.int32) << MBITS_F32 + + # shift the mantissa to bits 10:32 of the result + mantissa_f4 = x_pos & MANTISSA_MASK_F4 + mantissa_f32 = mantissa_f4.to(tl.int32) << (MBITS_F32 - MBITS_F4_E2M1) + output = mantissa_f32 + + # combine the pieces + result = exp_biased_f32 | mantissa_f32 + # result[zero_mask] = ZERO_BITS_F32 + result = tl.where(zero_mask, ZERO_BITS_F32, result) + # result[denormal_mask] = ZERO_POINT_FIVE_BITS_F32 + result = tl.where(denormal_mask, ZERO_POINT_FIVE_BITS_F32, result) + + # add sign back + sign_f32 = sign_f4.to(tl.int32) << ( + MBITS_F32 - MBITS_F4_E2M1 + EBITS_F32 - EBITS_F4_E2M1 + ) + result = result | sign_f32 + + # The bit shifting above is for float32, so for now we + # bitcast to float32 and then regular cast to bfloat16 + # TODO(later): it should be pretty easy to cast directly to bf16, just + # need to adjust the mbits/ebits/special values. Perf impact is likely + # to be small as we would not be chaning memory access patterns. + output = result.to(tl.float32, bitcast=True) + output = output.to(tl.bfloat16) + return output + + @triton.jit + def triton_f4_to_bf16_kernel( + x_ptr, + output_ptr, + n_elements_in, + BLOCK_SIZE_IN: tl.constexpr, + ): + pid = tl.program_id(axis=0) + n_elements_out = n_elements_in * 2 + BLOCK_SIZE_OUT: tl.constexpr = BLOCK_SIZE_IN * 2 + + block_start_in = pid * BLOCK_SIZE_IN + offsets_in = block_start_in + tl.arange(0, BLOCK_SIZE_IN) + + mask_in = offsets_in < n_elements_in + + # packed uint8 + x_packed = tl.load(x_ptr + offsets_in, mask=mask_in) + output = _fp4_packed_to_bf16(x_packed) + + # set up output offsets + block_start_out = pid * BLOCK_SIZE_OUT + offsets_out = block_start_out + tl.arange(0, BLOCK_SIZE_OUT) + mask_out = offsets_out < n_elements_out + + tl.store(output_ptr + offsets_out, output, mask=mask_out) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE_IN": 128}), + triton.Config({"BLOCK_SIZE_IN": 256}), + triton.Config({"BLOCK_SIZE_IN": 512}), + triton.Config({"BLOCK_SIZE_IN": 1024}), + triton.Config({"BLOCK_SIZE_IN": 2048}), + ], + key=["n_elements_in"], + ) + @triton.jit + def triton_f4_to_scaled_bf16_kernel( + x_ptr, + s_ptr, + output_ptr, + n_elements_in, + mx_block_size: tl.constexpr, + BLOCK_SIZE_IN: tl.constexpr, + ): + pid = tl.program_id(axis=0) + n_elements_out = n_elements_in * 2 + n_elements_s = n_elements_out // 32 + + BLOCK_SIZE_S: tl.constexpr = BLOCK_SIZE_IN // 16 + BLOCK_SIZE_OUT: tl.constexpr = BLOCK_SIZE_IN * 2 + + block_start_in = pid * BLOCK_SIZE_IN + offsets_in = block_start_in + tl.arange(0, BLOCK_SIZE_IN) + mask_in = offsets_in < n_elements_in + # packed uint8 + x_packed = tl.load(x_ptr + offsets_in, mask=mask_in) + output = _fp4_packed_to_bf16(x_packed) + + # load scale + block_start_s = pid * BLOCK_SIZE_S + offsets_s = block_start_s + tl.arange(0, BLOCK_SIZE_S) + mask_s = offsets_s < n_elements_s + s = tl.load(s_ptr + offsets_s, mask=mask_s) + + # create the scale in bf16 + s_offset = s.to(tl.int16) - E8M0_EXPONENT_BIAS + s_fp = libdevice.pow(2.0, s_offset).to(tl.bfloat16) + s_fp = tl.where(s != E8M0_EXPONENT_NAN_VAL, s_fp, float("nan")) + + # multiply output by scale + # TODO(later): see if manipulating the exponent instead of fp + # multiplication is going to give a significant speedup + output = tl.reshape( + output, (BLOCK_SIZE_OUT // mx_block_size, mx_block_size) + ) # noqa: E501 + s_fp = tl.reshape(s_fp, (BLOCK_SIZE_S // 1, 1)) + output = output * s_fp + output = tl.reshape(output, (BLOCK_SIZE_OUT,)) + + # set up output offsets + block_start_out = pid * BLOCK_SIZE_OUT + offsets_out = block_start_out + tl.arange(0, BLOCK_SIZE_OUT) + mask_out = offsets_out < n_elements_out + + tl.store(output_ptr + offsets_out, output, mask=mask_out) + +else: + + def triton_f4_to_bf16_kernel( + x_ptr, + output_ptr, + n_elements_in, + BLOCK_SIZE_IN, + ): + raise AssertionError("unsupported without triton") + + def triton_f4_to_scaled_bf16_kernel( + x_ptr, + s_ptr, + output_ptr, + n_elements_in, + mx_block_size, + BLOCK_SIZE_IN, + ): + raise AssertionError("unsupported without triton") + + +def triton_f4_to_bf16(x: torch.Tensor): + """ + Input: a tensor of packed fp4 values + Output: a tensor of bfloat16 values + + Note: this function is only used in testing, so we can test + the numerical correctness of the cast without the scaling. + """ + new_shape = (*x.shape[:-1], x.shape[-1] * 2) + output = torch.empty(*new_shape, device=x.device, dtype=torch.bfloat16) + assert x.is_contiguous() + assert x.is_cuda and output.is_cuda + n_elements_in = x.numel() + grid = lambda meta: ( # noqa: E731 + triton.cdiv(n_elements_in, meta["BLOCK_SIZE_IN"]), + ) # noqa: E731,E501 + triton_f4_to_bf16_kernel[grid](x, output, n_elements_in, BLOCK_SIZE_IN=512) + return output + + +def triton_f4_to_scaled_bf16( + x: torch.Tensor, + s_e8m0: torch.Tensor, + mx_block_size: int, +): + """ + Input: a tensor of packed fp4 values, and a scale in e8m0 format. The block + size is currently assumed to be 32. + Output: a tensor of bfloat16 values, multiplied by the encoded scale + """ + assert TORCH_VERSION_AFTER_2_4, "unsupported" + new_shape = (*x.shape[:-1], x.shape[-1] * 2) + output = torch.empty(*new_shape, device=x.device, dtype=torch.bfloat16) + assert x.is_contiguous() + assert x.is_cuda and output.is_cuda + n_elements_in = x.numel() + grid = lambda meta: ( # noqa: E731 + triton.cdiv(n_elements_in, meta["BLOCK_SIZE_IN"]), + ) + triton_f4_to_scaled_bf16_kernel[grid]( + x, s_e8m0, output, n_elements_in, mx_block_size + ) + return output + + +# pack/unpack code copy-pasted from +# https://github.com/pytorch-labs/ao/blob/main/torchao/dtypes/uint4.py + + +def down_size(size): + assert size[-1] % 2 == 0, f"{size} last dim not divisible by two" + return (*size[:-1], size[-1] // 2) + + +def up_size(size): + return (*size[:-1], size[-1] * 2) + + +def unpack_uint4(uint8_data) -> torch.Tensor: + """Get the original weight from the normalized float weight format""" + assert uint8_data.is_contiguous() + + shape = uint8_data.shape + + # since we are using uint8 we will decode 2 entries per byte + # Shift elements down 4 and select out the bottom 4 bits + # + # Note: known slow with triton + # * currently generates two kernels with a cat in between + # * after https://github.com/pytorch/pytorch/pull/123278 lands I + # verified that we get a single triton kernel, but that is even slower + # than the two kernels before this PR + # * TODO add a microbenchmark of just the cast and profile this + first_elements = (uint8_data >> 4).to(torch.uint8) + second_elements = (uint8_data & 0b1111).to(torch.uint8) + unpacked = torch.stack([first_elements, second_elements], dim=-1).view( + up_size(shape) + ) + + # trying Bert Maher's suggestion + # 2024-04-04: this works in unit tests but is broken on LLaMa 7B FFN with + # ptxas /tmp/tmp84wp7lea.ptx, line 227; error : Unexpected instruction types specified for 'sub' # noqa: E501 + # which seems to be the same issue as https://github.com/pytorch/pytorch/issues/118589 # noqa: E501 + # TODO(later): try removing subtractions from our cast to see if we can work around # noqa: E501 + # shift_tensor = torch.tensor([4, 0], dtype=torch.uint8, device=uint8_data.device) # noqa: E501 + # unpacked = (uint8_data.reshape(-1)[::, None] >> shift_tensor) & 0b1111 + # unpacked = unpacked.view(up_size(shape)) + + return unpacked + + +def pack_uint4(uint8_data) -> torch.Tensor: + # converting to uint8 for operations + shape = uint8_data.shape + assert shape[-1] % 2 == 0 + uint8_data = uint8_data.contiguous().view(-1) + return (uint8_data[::2] << 4 | uint8_data[1::2]).view(down_size(shape)) diff --git a/torchao/prototype/mx_formats/fp_format_spec.py b/torchao/prototype/mx_formats/fp_format_spec.py new file mode 100644 index 0000000000..2dc518add0 --- /dev/null +++ b/torchao/prototype/mx_formats/fp_format_spec.py @@ -0,0 +1,550 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +A helper script to summarize the key numerical values of various floating +point formats relevant to the MX spec. +""" + +import math +from typing import Tuple + +import tabulate + +import torch + +from torchao.prototype.mx_formats.constants import ( + DTYPE_FP4, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, +) + +from torchao.prototype.mx_formats.custom_cast import get_bits + +dtype_to_bitwidth = { + torch.float: 32, + torch.bfloat16: 16, + torch.float16: 16, + torch.float8_e4m3fn: 8, + torch.float8_e5m2: 8, + DTYPE_FP6_E3M2: 6, + DTYPE_FP6_E2M3: 6, +} +dtype_to_sem_len = { + torch.float: (1, 8, 23), + torch.bfloat16: (1, 8, 7), + torch.float16: (1, 5, 10), + torch.float8_e4m3fn: (1, 4, 3), + torch.float8_e5m2: (1, 5, 2), + # the line below is currently representing fp4 with bits 0:3 empty and + # bits 4:7 containing the fp4 encoding + # TODO(future): clean this up + torch.uint8: (1, 2, 1), +} +# bias = 2 ** (exp_bitwidth - 1) - 1 +dtype_to_exp_bias = { + torch.float: 127, + torch.bfloat16: 127, + torch.float16: 15, + torch.float8_e4m3fn: 7, + torch.float8_e5m2: 15, + DTYPE_FP6_E2M3: 1, + DTYPE_FP6_E3M2: 3, +} +dtype_to_int_dtype = { + torch.float: torch.int32, + torch.float16: torch.int16, + torch.bfloat16: torch.int16, + torch.float8_e4m3fn: torch.int8, + torch.float8_e5m2: torch.int8, + # for fp4 + # TODO(future): clean it up + torch.uint8: torch.uint8, +} + +# format: +# { +# dtype: [ +# [ +# ref_f32_value, sign_encoding, exp_encoding, mantissa_encoding, +# description, +# ], +# ..., +# ], +# ..., +# } +dtype_to_interesting_values = { + torch.float: [ + # zero and neg zero + (0.0, "0", "0" * 8, "0" * 23, "zero"), + (-0.0, "1", "0" * 8, "0" * 23, "zero_neg"), + # special values + (float("nan"), "0", "1" * 8, "1" + "0" * 22, "nan"), + (float("inf"), "0", "1" * 8, "0" * 23, "inf"), + (float("-inf"), "1", "1" * 8, "0" * 23, "inf_neg"), + # values below verified with from https://www.h-schmidt.net/FloatConverter/IEEE754.html # noqa: E501 + # largest normal + ( + 3.402823466385288598117042e38, + "0", + "1" * 7 + "0", + "1" * 23, + "largest_norm", + ), # noqa: E501 + ( + -3.402823466385288598117042e38, + "1", + "1" * 7 + "0", + "1" * 23, + "largest_norm_neg", + ), + # smallest normal + ( + 1.175494350822287507968737e-38, + "0", + "0" * 7 + "1", + "0" * 23, + "smallest_norm", + ), # noqa: E501 + ( + -1.175494350822287507968737e-38, + "1", + "0" * 7 + "1", + "0" * 23, + "smallest_norm_neg", + ), + # largest denormal + ( + 1.175494210692441075487029e-38, + "0", + "0" * 8, + "1" * 23, + "largest_denorm", + ), # noqa: E501 + ( + -1.175494210692441075487029e-38, + "1", + "0" * 8, + "1" * 23, + "largest_denorm_neg", + ), # noqa: E501 + # smallest denormal + ( + 1.401298464324817070923730e-45, + "0", + "0" * 8, + "0" * 22 + "1", + "smallest_denorm", + ), + ( + -1.401298464324817070923730e-45, + "1", + "0" * 8, + "0" * 22 + "1", + "smallest_denorm_neg", + ), + # positive and negative value + (30.0, "0", "10000011", "1" * 3 + "0" * 20, "random_pos"), + (-24.0, "1", "10000011", "1" + "0" * 22, "random_neg"), + ], + torch.bfloat16: [ + # zero and neg zero + (0.0, "0", "0" * 8, "0" * 7, "zero"), + (-0.0, "1", "0" * 8, "0" * 7, "zero_neg"), + # special values + (float("nan"), "0", "1" * 8, "1" + "0" * 6, "nan"), + (float("inf"), "0", "1" * 8, "0" * 7, "inf"), + (float("-inf"), "1", "1" * 8, "0" * 7, "inf_neg"), + # values below checked with TODO + # largest normal + (3.38953e38, "0", "1" * 7 + "0", "1" * 7, "largest_norm"), + (-3.38953e38, "1", "1" * 7 + "0", "1" * 7, "largest_norm_neg"), + # smallest normal + (1.17549e-38, "0", "0" * 7 + "1", "0" * 7, "smallest_norm"), + (-1.17549e-38, "1", "0" * 7 + "1", "0" * 7, "smallest_norm_neg"), + # largest denormal + (1.16631e-38, "0", "0" * 8, "1" * 7, "largest_denorm"), + (-1.16631e-38, "1", "0" * 8, "1" * 7, "largest_denorm_neg"), + # smallest denormal + (9.18355e-41, "0", "0" * 8, "0" * 6 + "1", "smallest_denorm"), + (-9.18355e-41, "1", "0" * 8, "0" * 6 + "1", "smallest_denorm_neg"), + # positive and negative value + (30.0, "0", "10000011", "1" * 3 + "0" * 4, "random_pos"), + (-24.0, "1", "10000011", "1" + "0" * 6, "random_neg"), + ], + torch.float16: [ + # zero and neg zero + (0.0, "0", "0" * 5, "0" * 10, "zero"), + (-0.0, "1", "0" * 5, "0" * 10, "zero_neg"), + # special values + (float("nan"), "0", "1" * 5, "1" + "0" * 9, "nan"), + (float("inf"), "0", "1" * 5, "0" * 10, "inf"), + (float("-inf"), "1", "1" * 5, "0" * 10, "inf_neg"), + # values below checked with https://en.wikipedia.org/wiki/Half-precision_floating-point_format # noqa: E501 + # largest normal + (65504, "0", "1" * 4 + "0", "1" * 10, "largest_normal"), + (-65504, "1", "1" * 4 + "0", "1" * 10, "largest_normal_neg"), + # smallest normal + (0.00006103515625, "0", "0" * 4 + "1", "0" * 10, "smallest_normal"), + ( + -0.00006103515625, + "1", + "0" * 4 + "1", + "0" * 10, + "smallest_normal_neg", + ), # noqa: E501 + # largest denormal + (0.000060975552, "0", "0" * 5, "1" * 10, "largest_denorm"), + (-0.000060975552, "1", "0" * 5, "1" * 10, "largest_denorm_neg"), + # smallest denormal + (0.000000059604645, "0", "0" * 5, "0" * 9 + "1", "smallest_denorm"), + ( + -0.000000059604645, + "1", + "0" * 5, + "0" * 9 + "1", + "smallest_denorm_neg", + ), # noqa: E501 + # positive and negative value + (30.0, "0", "10011", "1" * 3 + "0" * 7, "random_pos"), + (-24.0, "1", "10011", "1" + "0" * 9, "random_neg"), + ], + torch.float8_e4m3fn: [ + # zero and neg zero + (0.0, "0", "0000", "000", "zero"), + (-0.0, "1", "0000", "000", "zero_neg"), + # special values + # note: no pos or neg inf + (float("nan"), "0", "1111", "111", "nan"), + # values below checked with https://arxiv.org/pdf/2209.05433.pdf, Table 1 # noqa: E501 + # largest normal + (448.0, "0", "1111", "110", "largest_normal"), + (-448.0, "1", "1111", "110", "largest_normal_neg"), + # smallest normal + (2**-6, "0", "0001", "000", "smallest_normal"), + (-(2**-6), "1", "0001", "000", "smallest_normal_neg"), + # largest denormal + (0.875 * 2**-6, "0", "0000", "111", "largest_denormal"), + (-0.875 * 2**-6, "1", "0000", "111", "largest_denormal_neg"), + # smallest denormal + (2**-9, "0", "0000", "001", "smallest_denormal"), + (-(2**-9), "1", "0000", "001", "smallest_denormal_neg"), + # positive and negative value + (30.0, "0", "1011", "111", "random_pos"), + (-24.0, "1", "1011", "100", "random_neg"), + ], + torch.float8_e5m2: [ + # zero and neg zero + (0.0, "0", "00000", "00", "zero"), + (-0.0, "1", "00000", "00", "zero_neg"), + # special values + (float("nan"), "0", "11111", "11", "nan"), + (float("inf"), "0", "11111", "00", "inf"), + (float("-inf"), "1", "11111", "00", "inf_neg"), + # values below checked with https://arxiv.org/pdf/2209.05433.pdf, Table 1 # noqa: E501 + # largest normal + (57344.0, "0", "11110", "11", "largest_normal"), + (-57344.0, "1", "11110", "11", "largest_normal_neg"), + # smallest normal + (2**-14, "0", "00001", "00", "smallest_normal"), + (-(2**-14), "1", "00001", "00", "smallest_normal_neg"), + # largest denormal + (0.75 * 2**-14, "0", "00000", "11", "largest_denormal"), + (-0.75 * 2**-14, "1", "00000", "11", "largest_denormal_neg"), + # smallest denormal + (2**-16, "0", "00000", "01", "smallest_denormal"), + (-(2**-16), "1", "00000", "01", "smallest_denormal_neg"), + # positive and negative value + (32.0, "0", "10100", "00", "random_pos"), + (-24.0, "1", "10011", "10", "random_neg"), + ], +} + +# values for fp4_e2m1, as defined in the OCP spec for MXFP4 +# other than the sign, there are only 8 values, so just create +# the table by hand +# formula norm: sign * (2 ** (exp - 1)) * 1.x +# formula denorm: sign * (2 ** (exp - 1 + 1)) * 0.x +# format: val, formula, s, e, m, val, note +float4_e2m1_interesting_values = [ + (0, "1.0 * 2^0 * 0.0", "0", "00", "0", "zero"), + # same as largest denormal, there is only one + ( + 0.5, + "1.0 * 2^0 * 0.5", + "0", + "00", + "1", + "smallest_denormal", + ), # 2**0 * 0.5 # noqa: E501 + (1.0, "1.0 * 2^0 * 1.0", "0", "01", "0", "smallest_normal"), # 2**0 * 1.0 + (1.5, "1.0 * 2^0 * 1.5", "0", "01", "1", "val3"), # 2**0 * 1.5 + (2.0, "1.0 * 2^1 * 1.0", "0", "10", "0", "val4"), # 2**1 * 1.0 + (3.0, "1.0 * 2^1 * 1.5", "0", "10", "1", "val5"), # 2**1 * 1.5 + (4.0, "1.0 * 2^2 * 1.0", "0", "11", "0", "val6"), # 2**2 * 1.0 + (6.0, "1.0 * 2^2 * 1.5", "0", "11", "1", "largest_normal"), # 2**2 * 1.5 +] +float4_e2m1_neg = [] +for fp32_ref, formula, _s, e, m, label in float4_e2m1_interesting_values: + float4_e2m1_neg.append( + [-1 * fp32_ref, "-" + formula, "1", e, m, label + "_neg"] + ) # noqa: E501 +float4_e2m1_interesting_values.extend(float4_e2m1_neg) +del float4_e2m1_neg + +# https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf, section 5.3.2 # noqa: E501 +float6_e3m2_interesting_values = [ + (0, "1.0 * 2^-2 * 0.0", "0", "000", "00", "zero"), + (0.0625, "1.0 * 2^-2 * 0.25", "0", "000", "01", "smallest_denormal"), + (0.1875, "1.0 * 2^-2 * 0.75", "0", "000", "11", "largest_denormal"), + (0.25, "1.0 * 2^-2 * 1.0", "0", "001", "00", "smallest_normal"), + (28.0, "1.0 * 2^4 * 1.75", "0", "111", "11", "largest_normal"), +] +float6_e3m2_neg = [] +for fp32_ref, formula, _s, e, m, label in float6_e3m2_interesting_values: + float6_e3m2_neg.append( + [-1 * fp32_ref, "-" + formula, "1", e, m, label + "_neg"] + ) # noqa: E501 +float6_e3m2_interesting_values.extend(float6_e3m2_neg) +del float6_e3m2_neg + +# https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf, section 5.3.2 # noqa: E501 +float6_e2m3_interesting_values = [ + (0, "1.0 * 2^0 * 0.0", "0", "00", "000", "zero"), + (0.125, "1.0 * 2^0 * 0.125", "0", "00", "001", "smallest_denormal"), + (0.875, "1.0 * 2^0 * 0.875", "0", "00", "111", "largest_denormal"), + (1.0, "1.0 * 2^0 * 1.0", "0", "01", "000", "smallest_normal"), + (7.5, "1.0 * 2^2 * 1.875", "0", "11", "111", "largest_normal"), +] +float6_e2m3_neg = [] +for fp32_ref, formula, _s, e, m, label in float6_e2m3_interesting_values: + float6_e2m3_neg.append( + [ + -1 * fp32_ref, + "-" + formula, + "1", + e, + m, + label + "_neg", + ] + ) +float6_e2m3_interesting_values.extend(float6_e2m3_neg) +del float6_e2m3_neg + + +def _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype): + # test going from float to encoding + x = torch.tensor(fp_ref, dtype=dtype) + bitwidth = dtype_to_bitwidth[dtype] + s_enc, e_enc, m_enc = get_sem_bits(x, bitwidth=bitwidth) + assert s_enc_ref == s_enc + assert e_enc_ref == e_enc, f"{e_enc_ref} != {e_enc}" + assert m_enc_ref == m_enc, f"{m_enc_ref} != {m_enc}" + + # test going from encoding to float + s_i, e_i, m_f, special_value = sem_bits_to_sem_vals( + s_enc, + e_enc, + m_enc, + dtype, + ) + fp = sem_vals_to_f32(s_i, e_i, m_f, special_value) + assert_same(fp_ref, fp) + + +def get_sem_bits(x: torch.Tensor, bitwidth: int) -> Tuple[str, str, str]: + """ + Input: a tensor with a single element of the target element dtype + - for PT core dtypes, that dtype (fp32, fp16, fp8_e4m3, etc) + - for fp4_e2m1, fp6_e3m2, fp6_e2m3, not supported in this function + Output: bit strings for sign, exponent, mantissa encodings of the input + """ + assert x.numel() == 1 + s_len, e_len, m_len = dtype_to_sem_len[x.dtype] + + new_dtype = dtype_to_int_dtype[x.dtype] + x = x.view(new_dtype) + np_res = get_bits(x) + if bitwidth == 4: + # TODO(future): clean up this fp4 codepath + offset = 4 + s, e, m = ( + np_res[offset], + np_res[offset + s_len : (offset + s_len + e_len)], # noqa: E203 + np_res[(offset + s_len + e_len) :], # noqa: E203 + ) + else: + s, e, m = ( + np_res[0], + np_res[s_len : (s_len + e_len)], # noqa: E203 + np_res[(s_len + e_len) :], # noqa: E203 + ) + assert len(s) == s_len + assert len(e) == e_len + assert len(m) == m_len + return s, e, m + + +def exp_encoding_to_exp(exp_bit_str: str, dtype): + """ + Input: bit string of exponent for dtype + Output: integer representation of exponent + """ + exp_biased = int(exp_bit_str, 2) + exp_bias = dtype_to_exp_bias[dtype] + exp_unbiased = exp_biased - exp_bias + + # for denormalized values, increment exponent back + # up by one + if all(b == "0" for b in exp_bit_str): + exp_unbiased += 1 + + return exp_unbiased + + +def sem_bits_to_sem_vals(s_enc, e_enc, m_enc, dtype): + """ + Input: encodings of sign, exponent, mantissa for dtype + Output: integer sign, integer exponent, float32 mantissa, special value + + Supported dtypes: PT core dtypes and fp6_e3m2 and fp6_e2m3 + Not supported dtypes: fp4 + + If special value is filled out, sem are none + If sem are filled out, special value is none + """ + sign = 1 if s_enc == "0" else -1 + + # handle special values + if all(bit == "1" for bit in e_enc): + dtypes = ( + torch.float32, + torch.bfloat16, + torch.float16, + torch.float8_e5m2, + ) + if dtype in dtypes: + if all(bit == "0" for bit in m_enc): + if s_enc == "0": + return None, None, None, float("inf") + else: + return None, None, None, float("-inf") + else: + return None, None, None, float("nan") + elif dtype in (DTYPE_FP6_E2M3, DTYPE_FP6_E3M2): + # no special values in f6 dtypes + pass + else: + assert dtype is torch.float8_e4m3fn + # 1. float8_e4m3fn does not have infinity + # 2. float8_e4m3fn only sets {s}.{1111}.{111} for nan + if all(b == "1" for b in e_enc + m_enc): + return None, None, None, float("nan") + + exponent = exp_encoding_to_exp(e_enc, dtype) + + is_zero = all(b == "0" for b in e_enc + m_enc) + is_denormal = (not is_zero) and all(b == "0" for b in e_enc) + is_normal = not is_zero and not is_denormal + + if is_zero: + return sign, exponent, 0.0, None + + mantissa = 1.0 if is_normal else 0.0 + cur_pow_2 = -1 + for m_bit in m_enc: + mantissa += int(m_bit) * pow(2, cur_pow_2) + cur_pow_2 -= 1 + return sign, exponent, mantissa, None + + +def sem_vals_to_f32(s_i, e_i, m_f, special_value): + """ + Input: integer sign, integer exponent, float32 mantissa, special value + Output: float32 value + """ + if special_value is not None: + return special_value + f = s_i * pow(2, e_i) * m_f + return f + + +def sem_vals_to_formula(s_i, e_i, m_f, special_value): + """ + Input: integer sign, integer exponent, float32 mantissa, special value + Output: formula to get the float32 value + """ + if special_value is not None: + return special_value + return f"{s_i} * 2^{e_i} * {m_f}" + + +def assert_same(fp1, fp2): + if math.isnan(fp1): + assert math.isnan(fp2) + elif math.isinf(fp1): + if fp1 > 0: + assert math.isinf(fp2) and fp2 > 0 + else: + assert math.isinf(fp2) and fp2 < 0 + else: + assert (abs(fp2 - fp1) / (fp1 + 1e-20)) - 1 < 1e-12, f"{fp2} != {fp1}" + + +def run(dtype): + print("dtype", dtype) + + headers = ["orig_val", "formula", "s_enc", "e_enc", "m_enc", "note"] + results = [] + + if dtype == DTYPE_FP4: + results = float4_e2m1_interesting_values + elif dtype == DTYPE_FP6_E3M2: + results = float6_e3m2_interesting_values + elif dtype == DTYPE_FP6_E2M3: + results = float6_e2m3_interesting_values + else: + interesting_values = dtype_to_interesting_values[dtype] + for row in interesting_values: + fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, notes = row + + # test that things still work + _assert_equals(fp_ref, s_enc_ref, e_enc_ref, m_enc_ref, dtype) + + # create the formula + s_i, e_i, m_f, special_value = sem_bits_to_sem_vals( + s_enc_ref, e_enc_ref, m_enc_ref, dtype + ) + formula = sem_vals_to_formula(s_i, e_i, m_f, special_value) + + # create the table row + results.append( + [ + fp_ref, + formula, + s_enc_ref, + e_enc_ref, + m_enc_ref, + notes, + ] + ) + + print(tabulate.tabulate(results, headers=headers)) + print("\n") + + +if __name__ == "__main__": + for dtype in ( + torch.float, + torch.bfloat16, + torch.float16, + torch.float8_e4m3fn, + torch.float8_e5m2, + DTYPE_FP6_E3M2, + DTYPE_FP6_E2M3, + DTYPE_FP4, + ): + run(dtype) diff --git a/torchao/prototype/mx_formats/mx_linear.py b/torchao/prototype/mx_formats/mx_linear.py new file mode 100644 index 0000000000..c429eb57d4 --- /dev/null +++ b/torchao/prototype/mx_formats/mx_linear.py @@ -0,0 +1,160 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Defines the UX for converting a model to use mx weights + +For now, this is a module swap for speed of iteration. + +Eventually we plan to move this to a tensor subclass weight wrapper for +inference, and to a tensor subclass weight wrapper + module hooks for training. +""" + +import torch +import torch.nn.functional as F + +from torchao.prototype.mx_formats.mx_tensor import MXTensor, to_mx + + +@torch._dynamo.allow_in_graph +class NoopFwToMXBw(torch.autograd.Function): + """ + Forward: no-op + Backward: cast grad to MX + """ + + @staticmethod + def forward(ctx, x, elem_dtype, block_size): + ctx.elem_dtype = elem_dtype + ctx.block_size = block_size + return x + + @staticmethod + def backward(ctx, g): + scale, data = to_mx(g, ctx.elem_dtype, ctx.block_size) + return ( + MXTensor(scale, data, ctx.elem_dtype, ctx.block_size, g.dtype), + None, + None, + ) + + +class MXLinear(torch.nn.Linear): + """ + Linear layer with the compute happening in emulate MX. Currently the MX + matmul is emulated since there is no hardware support yet. Activations, + weights and grads are casted to MX and back to high precision for each + matmul. + """ + + @classmethod + @torch.no_grad() + def from_float(cls, mod, elem_dtype, block_size): + mod.__class__ = MXLinear + mod.elem_dtype = elem_dtype + mod.block_size = block_size + return mod + + def forward(self, x): + x_mx = MXTensor.to_mx(x, self.elem_dtype, self.block_size) + w_mx = MXTensor.to_mx(self.weight, self.elem_dtype, self.block_size) + y = F.linear(x_mx, w_mx, self.bias) + y = NoopFwToMXBw.apply(y, self.elem_dtype, self.block_size) + return y + + +class MXInferenceLinear(torch.nn.Linear): + """ + Inference version of MXLinear, with the weight pre-quantized to MX. + """ + + @classmethod + @torch.no_grad() + def from_float(cls, mod, elem_dtype, block_size): + with torch.device("meta"): + super_kwargs = { + "in_features": mod.in_features, + "out_features": mod.out_features, + "bias": False, + } + new_mod = cls(**super_kwargs) + # TODO(future PR): set to new_mod.weight directly, will need to work + # through some errors + new_mod.weight_mx = MXTensor.to_mx( + mod.weight.t().contiguous(), elem_dtype, block_size=block_size + ).t() + new_mod.bias = mod.bias + new_mod.elem_dtype = elem_dtype + return new_mod + + @torch.no_grad() + def forward(self, x): + w_hp = self.weight_mx.to_dtype(x.dtype) + y = F.linear(x, w_hp, self.bias) + return y + + +def replace_with_custom_fn_if_matches_filter( + model, replacement_fn, filter_fn, cur_fqn="" +) -> None: + """ + For each `child` in `model`, replaces it with `replacement_fn(child)` + if `filter_fn(child)` is `True` + """ + name_to_child = dict(model.named_children()) + for name, child in name_to_child.items(): + if cur_fqn == "": + new_fqn = name + else: + new_fqn = f"{cur_fqn}.{name}" + if filter_fn(child, new_fqn): + new_child = replacement_fn(child) + setattr(model, name, new_child) + else: + replace_with_custom_fn_if_matches_filter( + child, replacement_fn, filter_fn, new_fqn + ) + + +def _is_linear(mod, fqn): + return isinstance(mod, torch.nn.Linear) + + +def swap_linear_with_mx_linear(model, elem_dtype, block_size, filter_fn=None): + if filter_fn is None: + combined_filter_fn = _is_linear + else: + + def __fn(mod, fqn): + return _is_linear(mod, fqn) and filter_fn(mod, fqn) + + combined_filter_fn = __fn + replace_with_custom_fn_if_matches_filter( + model, + lambda mod: MXLinear.from_float(mod, elem_dtype, block_size), + combined_filter_fn, + ) + + +def swap_linear_with_mx_inference_linear( + model, + elem_dtype, + block_size, + filter_fn=None, +): + if filter_fn is None: + combined_filter_fn = _is_linear + else: + + def __fn(mod, fqn): + return _is_linear(mod, fqn) and filter_fn(mod, fqn) + + combined_filter_fn = __fn + replace_with_custom_fn_if_matches_filter( + model, + lambda mod: MXInferenceLinear.from_float(mod, elem_dtype, block_size), + combined_filter_fn, + ) diff --git a/torchao/prototype/mx_formats/mx_ops.py b/torchao/prototype/mx_formats/mx_ops.py new file mode 100644 index 0000000000..7a404b89a8 --- /dev/null +++ b/torchao/prototype/mx_formats/mx_ops.py @@ -0,0 +1,158 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +This file defines the ops needed for our tensor subclass implementation +of `MXTensor` to work naturally in PyTorch programs. For example, if +the modeling code is written as + + x_mx = MXTensor.to_mx(x, torch.float8_e4m3fn) + w_mx = MXTensor.to_mx(w, torch.float8_e4m3fn) + y = F.linear(x_mx, w_mx) + +then the ops in this file are used under the hood to properly route +the underlying data fields to the MX matmul. +""" + +from typing import Any, Dict + +import torch +from torch.utils._pytree import tree_map + +from torchao.prototype.mx_formats.constants import DTYPE_FP4 +from torchao.prototype.mx_formats.mx_tensor import ( # noqa: E501 + MXTensor, + tensor_size_hp_to_fp4x2, +) + +aten = torch.ops.aten + +MX_OPS_TABLE: Dict[Any, Any] = {} + + +def implements(aten_ops): + """Register aten ops to the mx op table""" + + def decorator(func): + for op in aten_ops: + MX_OPS_TABLE[op] = func + return func + + return decorator + + +@implements([aten.detach.default]) +def mx_desugar_op(aten_op, args, kwargs=None): + old = args[0] + new_data = aten_op(old._data, *args[1:], **kwargs) + new = MXTensor( + old._scale_e8m0, + new_data, + old._elem_dtype, + old._block_size, + old._orig_dtype, + ) + return new + + +@implements([aten.mm.default, aten.matmul.default]) +def mx_mm(aten_op, args, kwargs=None): + a = args[0] + b = args[1] + assert isinstance(a, MXTensor) and isinstance(b, MXTensor) + a_hp = a.to_dtype(a._orig_dtype) + b_hp = b.to_dtype(b._orig_dtype) + res = aten_op(a_hp, b_hp) + return res + + +@implements([aten.addmm.default]) +def mx_addmm(aten_op, args, kwargs=None): + a = args[0] + b = args[1] + c = args[2] + assert isinstance(b, MXTensor) and isinstance(c, MXTensor) + b_hp = b.to_dtype(b._orig_dtype) + c_hp = c.to_dtype(c._orig_dtype) + res = aten_op(a, b_hp, c_hp) + return res + + +@implements([aten.t.default]) +def mx_t(aten_op, args, kwargs=None): + # For now, only transpose(input, 0, 1) is supported. + old = args[0] + new = MXTensor( + old._scale_e8m0, + old._data.t(), + old._elem_dtype, + old._block_size, + old._orig_dtype, + ) + return new + + +@implements([aten.sum.dim_IntList]) +def mx_cast_up_op(aten_op, args, kwargs=None): + """Be careful with this function, this is a "fallback" op that + casts the output of the op to the original precision. And performs the op. + + We currently need this to support the backward for admmm bias. + "addmm" -> out + "hp_gradBias" <-"sum" <- "identity" <- gradOut <- "hp_gradOut" + """ + + def unwrap(x): + if isinstance(x, MXTensor): + return x.to_dtype(x._orig_dtype) + return x + + new_args = tree_map(unwrap, args) + new_kwargs = tree_map(unwrap, kwargs) + return aten_op(*new_args, **new_kwargs) + + +@implements([aten.view.default]) +def mx_view_op(aten_op, args, kwargs=None): + data = args[0]._data + new_size = args[1] + if args[0]._elem_dtype == DTYPE_FP4: + # special case fp4 as we pack two elements per byte + new_size = tensor_size_hp_to_fp4x2(new_size, data.is_contiguous()) + new_data = aten_op(data, new_size, *args[2:], **kwargs) + return MXTensor( + args[0]._scale_e8m0, + new_data, + args[0]._elem_dtype, + args[0]._block_size, + args[0]._orig_dtype, + ) + + +@implements([aten._to_copy.default]) +def autocast_to_copy(aten_op, args, kwargs=None): + """This gets called when running matmul under autocast + when the input is a MXTensor, presenting as a fp32 + tensor. + """ + assert isinstance(args[0], MXTensor) + # print('before', args[0], args[0].dtype, args[0]._orig_dtype) + assert ( + len(kwargs) == 1 and "dtype" in kwargs + ), "Only support dtype kwarg for autocast" + assert kwargs["dtype"] in { + torch.float16, + torch.bfloat16, + }, "Only support floating point conversion for autocast w/ MXTensor" + res = MXTensor( + args[0]._scale_e8m0, + args[0]._data, + args[0]._elem_dtype, + args[0]._block_size, + kwargs["dtype"], + ) + # print('after', res, res.dtype, res._orig_dtype) + return res diff --git a/torchao/prototype/mx_formats/mx_tensor.py b/torchao/prototype/mx_formats/mx_tensor.py new file mode 100644 index 0000000000..6efc2f5748 --- /dev/null +++ b/torchao/prototype/mx_formats/mx_tensor.py @@ -0,0 +1,416 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Defines the tensor subclasses to represent the MX format spec from +https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf + +Exponent E8M0 encoding details (OCP spec section 5.4.1): + * bias: 127 + * supported exponent range: -127 to 127 + * infinities: N/A + * NaN: 11111111 + * Zeros: N/A +""" + +from typing import Dict, Union + +import torch + +import torchao.prototype.mx_formats.config as config +from torchao.prototype.mx_formats.constants import ( + BLOCK_SIZE_DEFAULT, + DTYPE_FP4, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + E8M0_EXPONENT_BIAS, + E8M0_EXPONENT_NAN_VAL, + F32_MIN_NORMAL, + F4_E2M1_MAX, + F4_E2M1_MAX_POW2, + F6_E2M3_MAX, + F6_E2M3_MAX_POW2, + F6_E3M2_MAX, + F6_E3M2_MAX_POW2, + F8E4M3_MAX, + F8E4M3_MAX_POW2, + F8E5M2_MAX, + F8E5M2_MAX_POW2, + SUPPORTED_ELEM_DTYPES, +) + +from torchao.prototype.mx_formats.custom_cast import ( + f32_to_f4_unpacked, + f32_to_f6_e2m3_unpacked, + f32_to_f6_e3m2_unpacked, + f4_unpacked_to_f32, + f6_e2m3_unpacked_to_f32, + f6_e3m2_unpacked_to_f32, + pack_uint4, + triton_f4_to_scaled_bf16, + unpack_uint4, +) + + +def to_mx( + data_hp: torch.Tensor, + elem_dtype: Union[torch.dtype, str], + block_size: int, +): + """ + Takes a high precision tensor and converts to MX scale and raw data, in + naive layout (scale and raw data are separate tensors). + """ + + assert data_hp.dtype in ( + torch.bfloat16, + torch.float, + ), f"{data_hp.dtype} is not supported yet" + # TODO(future PR): consider supporting padding + assert data_hp.numel() % block_size == 0, "unsupported" + assert data_hp.is_contiguous(), "unsupported" + assert elem_dtype in SUPPORTED_ELEM_DTYPES, "unsupported" + + # calculate the scale in e8m0 format + + orig_shape = data_hp.shape + data_hp = data_hp.reshape(-1, block_size) + + # find max value of the data + # Note: this only implements the `minimally supported` version of + # https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf + # section 6.3. + max_abs = torch.amax(torch.abs(data_hp), 1) + + # Add an epsilon to prevent the log2 function call for returning -inf + # where the values are zero. + eps = F32_MIN_NORMAL * (max_abs == 0).type(max_abs.dtype) + + # Find largest power of 2 less than or equal to max_abs. + largest_p2_lt_max_abs = torch.floor(torch.log2(max_abs + eps)) + + # Set X to be the largest power-of-two less than or equal to + # max_abs(v), divided by the largest power of two representable + # in the element data type + if elem_dtype == torch.float8_e4m3fn: + target_max_pow2 = F8E4M3_MAX_POW2 + elif elem_dtype == torch.float8_e5m2: + target_max_pow2 = F8E5M2_MAX_POW2 + elif elem_dtype == DTYPE_FP6_E2M3: + target_max_pow2 = F6_E2M3_MAX_POW2 + elif elem_dtype == DTYPE_FP6_E3M2: + target_max_pow2 = F6_E3M2_MAX_POW2 + elif elem_dtype == DTYPE_FP4: + target_max_pow2 = F4_E2M1_MAX_POW2 + else: + raise AssertionError("unsupported") + scale_e8m0_unbiased = largest_p2_lt_max_abs - target_max_pow2 + + # Clamp to exponents that can be represented in e8m0 + scale_e8m0_unbiased = torch.clamp( + scale_e8m0_unbiased, min=-E8M0_EXPONENT_BIAS, max=E8M0_EXPONENT_BIAS + ) + + # Create the biased e8m0 representation and cast it to 8 bits + scale_e8m0_biased = scale_e8m0_unbiased + E8M0_EXPONENT_BIAS + scale_e8m0_biased = scale_e8m0_biased.to(torch.uint8) + + # Conversion to torch.uint8 sets NaN values to 0, fix this by + # explicitly setting known NaN values to 255 + scale_e8m0_biased = torch.where( + torch.isnan(scale_e8m0_unbiased), + E8M0_EXPONENT_NAN_VAL, + scale_e8m0_biased, + ) + + # For now, calculate the scale in floating point. + # TODO(future) audit if there is a need to bit shift exponents instead. + scale_fp = torch.pow( + torch.full(max_abs.size(), 2.0, device=scale_e8m0_biased.device), + scale_e8m0_unbiased, + ) + + # Today, 2**-127 returns 0 in compile+inductor+triton because it is in the + # float32 denormal range. For now, manually adjust the fp scale. This is + # relevant if all of the incoming block values are zeroes. + # See https://github.com/pytorch/pytorch/issues/125557 for details. + # Note: it would be more correct to set the minimum to 2**-127, but this + # does not work in triton either as it looks like subnormal value handling + # has some gaps. So, for now just set to the minimum normal value. + scale_fp = torch.clamp(scale_fp, min=F32_MIN_NORMAL) + + # scale and saturated cast the data elements to max of target dtype + if elem_dtype == torch.float8_e4m3fn: + max_pos = F8E4M3_MAX + elif elem_dtype == torch.float8_e5m2: + max_pos = F8E5M2_MAX + elif elem_dtype == DTYPE_FP6_E2M3: + max_pos = F6_E2M3_MAX + elif elem_dtype == DTYPE_FP6_E3M2: + max_pos = F6_E3M2_MAX + elif elem_dtype == DTYPE_FP4: + max_pos = F4_E2M1_MAX + else: + raise AssertionError("unsupported") + data_lp = torch.clamp( + data_hp / scale_fp.unsqueeze(1), min=-1 * max_pos, max=max_pos + ) + data_lp = data_lp.reshape(orig_shape) + + # cast to target dtype + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + data_lp = data_lp.to(elem_dtype) + elif elem_dtype == DTYPE_FP6_E2M3: + data_lp = f32_to_f6_e2m3_unpacked(data_lp) + elif elem_dtype == DTYPE_FP6_E3M2: + data_lp = f32_to_f6_e3m2_unpacked(data_lp) + elif elem_dtype == DTYPE_FP4: + data_lp = f32_to_f4_unpacked(data_lp) + data_lp = pack_uint4(data_lp) + else: + raise AssertionError("unsupported") + + return scale_e8m0_biased, data_lp + + +def get_fp_scale(scale_e8m0): + s_offset = scale_e8m0.to(torch.int16) - E8M0_EXPONENT_BIAS + # TODO(later): it would be nice if there was a way to do the 2^x operation + # in PyTorch without creating a tensor of twos + two = torch.full(s_offset.size(), 2.0, device=scale_e8m0.device) + # pow(two, s_offset) can be out of range of floating point formats. + # TODO(later): handle this for float16 if we decide to support float16 + # scales. + s_fp = torch.pow(two, s_offset) + + # If a block exponent was 255, set values of that block to NaN + s_fp = torch.where(scale_e8m0 != E8M0_EXPONENT_NAN_VAL, s_fp, float("nan")) + + return s_fp + + +def to_dtype(data_lp, scale_e8m0, elem_dtype, block_size, target_dtype): + orig_shape = data_lp.shape + is_transposed = not data_lp.is_contiguous() + # if the underlying data is transposed, convert to row major before + # unpacking and unscaling + if is_transposed: + data_lp = data_lp.t() + assert data_lp.is_contiguous() + orig_shape = (orig_shape[1], orig_shape[0]) + + if elem_dtype in (torch.float8_e4m3fn, torch.float8_e5m2): + data_hp = data_lp.to(target_dtype) + elif elem_dtype == DTYPE_FP6_E2M3: + data_hp = f6_e2m3_unpacked_to_f32(data_lp) + data_hp = data_hp.to(target_dtype) + elif elem_dtype == DTYPE_FP6_E3M2: + data_hp = f6_e3m2_unpacked_to_f32(data_lp) + data_hp = data_hp.to(target_dtype) + elif elem_dtype == DTYPE_FP4: + if config.use_fp4_custom_triton_dequant_kernel: + data_hp_rescaled = triton_f4_to_scaled_bf16( + data_lp, + scale_e8m0, + block_size, + ) + if is_transposed: + data_hp_rescaled = data_hp_rescaled.t() + return data_hp_rescaled.to(target_dtype) + else: + # fp4 + f4_unpacked = unpack_uint4(data_lp) + # for now we only have a cast to f32 + # TODO(future PR): add cast directly to bf16 + f32 = f4_unpacked_to_f32(f4_unpacked) + data_hp = f32.to(target_dtype) + # manually adjust shape to account for the unpacking + # TODO(future PR): clean up the shape code and remove the hack + # below + orig_shape = (*orig_shape[:-1], orig_shape[-1] * 2) + else: + raise AssertionError("unsupported") + + data_hp = data_hp.reshape(-1, block_size) + s_fp = get_fp_scale(scale_e8m0).reshape(-1, 1).to(target_dtype) + data_hp = data_hp * s_fp + data_hp = data_hp.reshape(orig_shape) + + # if we converted to row-major before unscaling convert back + if is_transposed: + data_hp = data_hp.t() + + return data_hp + + +def tensor_size_hp_to_fp4x2(orig_size, is_contiguous): + new_size = orig_size + if is_contiguous: + new_size = [*list(new_size[:-1]), new_size[-1] // 2] + else: + new_size = [new_size[0] // 2, *list(new_size[1:])] + return new_size + + +def tensor_size_fp4x2_to_hp(orig_size, is_contiguous): + new_size = orig_size + if is_contiguous: + new_size = [*list(new_size[:-1]), new_size[-1] * 2] + else: + new_size = [new_size[0] * 2, *list(new_size[1:])] + return new_size + + +@torch._dynamo.allow_in_graph +class ToMXConstrFunc(torch.autograd.Function): + """ + Differentiable cast to MX, no-op in backward + """ + + @staticmethod + def forward(ctx, data_hp, elem_dtype, block_size): + scale_e8m0_biased, data_lp = to_mx(data_hp, elem_dtype, block_size) + return MXTensor( + scale_e8m0_biased, data_lp, elem_dtype, block_size, data_hp.dtype + ) + + @staticmethod + def backward(ctx, g): + return g, None, None + + +@torch._dynamo.allow_in_graph +class FromMXConstrFunc(torch.autograd.Function): + """ + Differentiable cast from MX, no-op in backward + """ + + @staticmethod + def forward(ctx, tensor_lp, target_dtype): + return to_dtype( + tensor_lp._data, + tensor_lp._scale_e8m0, + tensor_lp._elem_dtype, + tensor_lp._block_size, + target_dtype, + ) + + @staticmethod + def backward(ctx, g): + return g, None, None + + +class MXTensor(torch.Tensor): + def __new__( + cls, + scale_e8m0_bits, + data_bits, + elem_dtype, + block_size, + orig_dtype, + ): + new_size = data_bits.size() + if elem_dtype == DTYPE_FP4: + # set the tensor size to what it would be without 2x4 packing + new_size = tensor_size_fp4x2_to_hp( + new_size, + data_bits.is_contiguous(), + ) + self = torch.Tensor._make_wrapper_subclass( + cls, + new_size, + dtype=orig_dtype, + device=data_bits.device, + ) + assert scale_e8m0_bits.dtype == torch.uint8, "unsupported" + assert len(scale_e8m0_bits.shape) == 1, "unsupported" + assert data_bits.dtype in ( + torch.float8_e4m3fn, + torch.float8_e5m2, + torch.uint8, + ), "unsupported" + if elem_dtype in ( + torch.float8_e4m3fn, + torch.float8_e5m2, + DTYPE_FP6_E2M3, + DTYPE_FP6_E3M2, + ): + target_numel = scale_e8m0_bits.numel() * block_size + elif elem_dtype == DTYPE_FP4: + assert data_bits.dtype is torch.uint8 # fp4 + target_numel = scale_e8m0_bits.numel() * block_size / 2 + else: + raise AssertionError("unsupported") + if not issubclass( + torch._subclasses.fake_tensor.FakeTensor, + type(data_bits), + ): + # this check is sometimes broken for FakeTensor + # TODO investigate + assert ( + target_numel == data_bits.numel() + ), f"{target_numel} != {data_bits.numel()}" + + # `_scale_e8m0` has rank 1 and applies to a row-major memory layout of + # `_data` + self._scale_e8m0 = scale_e8m0_bits + self._data = data_bits + self._elem_dtype = elem_dtype + self._block_size = block_size + self._orig_dtype = orig_dtype + return self + + def __repr__(self): + # TODO better elem dtype print for fp4 + return f"MXTensor: elem_dtype: {self._elem_dtype}, s_e8m0: {self._scale_e8m0}, d: {self._data}, d_hp: {self.to_dtype(self._orig_dtype)}" # noqa: E501 + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs=None): + # avoid circular dependency + from torchao.prototype.mx_formats.mx_ops import MX_OPS_TABLE + + if func in MX_OPS_TABLE: + return MX_OPS_TABLE[func](func, args, kwargs) + + raise NotImplementedError(f"{func} not implemented") + + def to_dtype(self, target_dtype): + return FromMXConstrFunc.apply(self, target_dtype) + + @staticmethod + @torch._dynamo.allow_in_graph + def to_mx( + data_hp: torch.Tensor, + elem_dtype: Union[torch.dtype, str], + block_size: int = BLOCK_SIZE_DEFAULT, + ): + return ToMXConstrFunc.apply(data_hp, elem_dtype, block_size) + + def __tensor_flatten__(self): + ctx = { + "_elem_dtype": self._elem_dtype, + "_block_size": self._block_size, + "_orig_dtype": self._orig_dtype, + } + return ["_scale_e8m0", "_data"], ctx + + @staticmethod + def __tensor_unflatten__( + inner_tensors: Dict, + metadata, + outer_size, + outer_stride, + ): + return MXTensor( + inner_tensors["_scale_e8m0"], + inner_tensors["_data"], + metadata["_elem_dtype"], + metadata["_block_size"], + metadata["_orig_dtype"], + ) + + # Do not force the MXTensor type on the returned tensor + __torch_function__ = torch._C._disabled_torch_function_impl diff --git a/torchao/utils.py b/torchao/utils.py index fcf853da3e..2835129f7a 100644 --- a/torchao/utils.py +++ b/torchao/utils.py @@ -1,4 +1,5 @@ import torch +import torch.utils.benchmark as benchmark def benchmark_model(model, num_runs, input_tensor): @@ -40,3 +41,17 @@ def wrapper(*args, **kwargs): return test_func(*args, **kwargs) return wrapper return decorator + + +def benchmark_torch_function_in_microseconds(f, *args, **kwargs): + # Manual warmup + + f(*args, **kwargs) + f(*args, **kwargs) + + t0 = benchmark.Timer( + stmt="f(*args, **kwargs)", + globals={"args": args, "kwargs": kwargs, "f": f}, # noqa: E501 + ) + measurement = t0.blocked_autorange() + return measurement.mean * 1e6 From 42c23768d379e7d5acd8af0d84ee2a7672a66fcd Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 28 May 2024 14:02:22 -0700 Subject: [PATCH 61/61] Factor out the specific configurations to helper functions (#286) Summary: int4wo, int8wo, int8dyn, 8da4w are specific configurations for quantize function, we factor that out in the PR so they are easy to use Test Plan: python test/quantization/test_quant_api.py Reviewers: Subscribers: Tasks: Tags: --- test/quantization/test_quant_api.py | 100 +++---------------------- torchao/quantization/quant_api.py | 110 ++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+), 91 deletions(-) diff --git a/test/quantization/test_quant_api.py b/test/quantization/test_quant_api.py index 70c2562bb3..9aae14dd83 100644 --- a/test/quantization/test_quant_api.py +++ b/test/quantization/test_quant_api.py @@ -37,6 +37,10 @@ Quantizer, TwoStepQuantizer, quantize, + get_apply_8da4w_quant, + get_apply_int4wo_quant, + get_apply_int8wo_quant, + get_apply_int8dyn_quant, ) from torchao.quantization.utils import ( TORCH_VERSION_AFTER_2_3, @@ -416,42 +420,11 @@ def test_eval_wrapper(self): # TODO: move to a separate test file @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") def test_quantized_tensor_subclass_8da4w(self): - # weight settings groupsize = 32 - mapping_type = MappingType.SYMMETRIC - block_size = (1, groupsize) - target_dtype = torch.int8 - eps = torch.finfo(torch.float32).eps - quant_min = -8 - quant_max = 7 - - # TODO: make a general helper function? - # input settings - def get_per_token_block_size(x): - block_size = [] - for i in range(len(x.shape)-1): - block_size.append(1) - block_size.append(x.shape[-1]) - return block_size - - # input settings - input_mapping_type = MappingType.ASYMMETRIC - input_target_dtype = torch.int8 - input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) - m = ToyLinearModel().eval() m_copy = copy.deepcopy(m) example_inputs = m.example_inputs() - - def apply_weight_quant(weight): - return to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps) - - def apply_act_quant(weight): - return to_laq(weight, input_quant_func) - - # note: order is important - m = quantize(m, apply_weight_quant) - m = quantize(m, apply_act_quant) + m = quantize(m, get_apply_8da4w_quant(groupsize=groupsize)) assert isinstance(m.linear1.weight, LinearActQuantizedTensor) assert isinstance(m.linear2.weight, LinearActQuantizedTensor) @@ -474,27 +447,13 @@ def apply_act_quant(weight): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int4(self): - # weight settings - groupsize = 32 - mapping_type = MappingType.ASYMMETRIC - block_size = (1, groupsize) - target_dtype = torch.int32 - quant_min = 0 - quant_max = 15 - eps = 1e-6 - preserve_zero = False - zero_point_dtype = torch.bfloat16 - zero_point_domain = ZeroPointDomain.FLOAT - # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") m_copy = copy.deepcopy(m) example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs())) - def apply_weight_quant(weight): - return to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) - - m = quantize(m, apply_weight_quant) + groupsize = 32 + m = quantize(m, get_apply_int4wo_quant(groupsize=groupsize)) assert isinstance(m.linear1.weight, AffineQuantizedTensor) assert isinstance(m.linear2.weight, AffineQuantizedTensor) @@ -511,21 +470,11 @@ def apply_weight_quant(weight): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int8(self): - # weight settings - mapping_type = MappingType.SYMMETRIC - target_dtype = torch.int8 - eps = torch.finfo(torch.float32).eps - zero_point_dtype = torch.int64 - m = ToyLinearModel().eval().to(torch.bfloat16) m_copy = copy.deepcopy(m) example_inputs = tuple(map(lambda x: x.to(torch.bfloat16), m.example_inputs())) - def apply_weight_quant(weight): - block_size = (1, weight.shape[1]) - return to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) - - m = quantize(m, apply_weight_quant) + m = quantize(m, get_apply_int8wo_quant()) assert isinstance(m.linear1.weight, AffineQuantizedTensor) assert isinstance(m.linear2.weight, AffineQuantizedTensor) @@ -543,43 +492,12 @@ def apply_weight_quant(weight): @unittest.skipIf(not TORCH_VERSION_AFTER_2_4, "Test only enabled for 2.4+") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_quantized_tensor_subclass_int8_dyn_quant(self): - # weight settings - mapping_type = MappingType.SYMMETRIC - def get_weight_block_size(x): - return (1, x.shape[1]) - target_dtype = torch.int8 - eps = torch.finfo(torch.float32).eps - zero_point_dtype = torch.int64 - - # input settings - def get_per_token_block_size(x): - block_size = list(x.shape) - for i in range(len(block_size)-1): - block_size[i] = 1 - return block_size - - input_mapping_type = MappingType.SYMMETRIC - input_target_dtype = torch.int8 - input_eps = 1e-5 - input_quant_min = -127 - input_quant_max = 127 - input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) - # use 1024 so that we don't need padding m = ToyLinearModel(1024, 1024, 1024).eval().to(torch.bfloat16).to("cuda") m_copy = copy.deepcopy(m) # setting batch_size to 20 to be compatible with the kernel example_inputs = tuple(map(lambda x: x.to(torch.bfloat16).to("cuda"), m.example_inputs(batch_size=20))) - - def apply_weight_quant(weight): - block_size = get_weight_block_size(weight) - return to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) - - def apply_act_quant(weight): - return to_laq(weight, input_quant_func) - - m = quantize(m, apply_weight_quant) - m = quantize(m, apply_act_quant) + m = quantize(m, get_apply_int8dyn_quant()) assert isinstance(m.linear1.weight, LinearActQuantizedTensor) assert isinstance(m.linear2.weight, LinearActQuantizedTensor) diff --git a/torchao/quantization/quant_api.py b/torchao/quantization/quant_api.py index d9b731bace..02678ab2cd 100644 --- a/torchao/quantization/quant_api.py +++ b/torchao/quantization/quant_api.py @@ -32,6 +32,12 @@ Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, QuantizedLinearWeightBase, + to_laq, +) + +from .quant_primitives import ( + MappingType, + ZeroPointDomain, ) from .weight_only import WeightOnlyInt8QuantLinear from .unified import Quantizer, TwoStepQuantizer @@ -56,6 +62,10 @@ "quantize", "autoquant", "_get_subclass_inserter", + "get_apply_8da4w_quant", + "get_apply_int4wo_quant", + "get_apply_int8wo_quant", + "get_apply_int8dyn_quant", ] if TORCH_VERSION_AFTER_2_3: @@ -287,3 +297,103 @@ def filter_fn(module, fqn): _is_linear if filter_fn is None else filter_fn, ) return model + +def get_apply_8da4w_quant(groupsize=32): + + def apply_8da4w_quant(weight): + # avoid circular dep + from torchao.dtypes.aqt import to_aq + + # weight settings + mapping_type = MappingType.SYMMETRIC + block_size = (1, groupsize) + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + quant_min = -8 + quant_max = 7 + + # TODO: make a general helper function? + # input settings + def get_per_token_block_size(x): + block_size = [] + for i in range(len(x.shape)-1): + block_size.append(1) + block_size.append(x.shape[-1]) + return block_size + + # input settings + input_mapping_type = MappingType.ASYMMETRIC + input_target_dtype = torch.int8 + input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype) + + weight = to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps) + weight = to_laq(weight, input_quant_func) + return weight + + return apply_8da4w_quant + + +def get_apply_int4wo_quant(groupsize=32): + def apply_int4wo_quant(weight): + # avoid circular dep + from torchao.dtypes.aqt import to_aq + + groupsize = 32 + mapping_type = MappingType.ASYMMETRIC + block_size = (1, groupsize) + target_dtype = torch.int32 + quant_min = 0 + quant_max = 15 + eps = 1e-6 + preserve_zero = False + zero_point_dtype = torch.bfloat16 + zero_point_domain = ZeroPointDomain.FLOAT + return to_aq(weight, mapping_type, block_size, target_dtype, quant_min, quant_max, eps, zero_point_dtype=zero_point_dtype, preserve_zero=preserve_zero, zero_point_domain=zero_point_domain) + + return apply_int4wo_quant + + +def get_apply_int8wo_quant(): + def apply_int8wo_quant(weight): + # avoid circular dep + from torchao.dtypes.aqt import to_aq + + mapping_type = MappingType.SYMMETRIC + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + zero_point_dtype = torch.int64 + block_size = (1, weight.shape[1]) + return to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + return apply_int8wo_quant + +def get_apply_int8dyn_quant(): + def apply_int8dyn_quant(weight): + # avoid circular dep + from torchao.dtypes.aqt import to_aq + # weight settings + mapping_type = MappingType.SYMMETRIC + def get_weight_block_size(x): + return (1, x.shape[1]) + target_dtype = torch.int8 + eps = torch.finfo(torch.float32).eps + zero_point_dtype = torch.int64 + + # input settings + def get_per_token_block_size(x): + block_size = list(x.shape) + for i in range(len(block_size)-1): + block_size[i] = 1 + return block_size + + input_mapping_type = MappingType.SYMMETRIC + input_target_dtype = torch.int8 + input_eps = 1e-5 + input_quant_min = -127 + input_quant_max = 127 + input_quant_func = lambda x: to_aq(x, input_mapping_type, get_per_token_block_size(x), input_target_dtype, eps=input_eps, quant_min=input_quant_min, quant_max=input_quant_max, scale_dtype=torch.float32 if x.dtype == torch.float16 else None) + + block_size = get_weight_block_size(weight) + weight = to_aq(weight, mapping_type, block_size, target_dtype, eps=eps, zero_point_dtype=zero_point_dtype) + weight = to_laq(weight, input_quant_func) + return weight + return apply_int8dyn_quant